prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
import io
import sqlite3
import pickle
from collections import Mapping
try:
from collections.abc import MutableMapping
except:
from collections import MutableMapping
import numpy as np
from . import util
class Tensor(object):
"""``numpy.array`` with attr and id.
:param data: ``numpy.array`` or ``Mapping``
:param attr: ``dict`` object or ``None``
:param id: ``str``
"""
def __init__(self, data, attr=None, id=None, **kwargs):
super(Tensor, self).__init__()
super(Tensor, self).__setattr__('_data', data)
super(Tensor, self).__setattr__(
'_id', str(id) if id is not None else util.gen_id()
)
super(Tensor, self).__setattr__(
'_attr',
None if attr is None and len(kwargs) == 0 else
dict(attr if attr is not None else {}, **kwargs)
)
@property
def id(self):
"""get id of this object
"""
return self._id
@property
def data(self):
"""get data of this object
"""
return self._data
@property
def attr(self):
"""get attr of this object. If ``_attr`` is None, this method creates\
an empty dict.
"""
if self._attr is None:
self._attr = {}
return self._attr
def __getattr__(self, name):
"""direct access to ``numpy.array``\'s attributes
:param name: ``str``
"""
return getattr(self._data, name)
def __getitem__(self, key):
"""direct access to ``numpy.array``\'s indexing
:param key: any objects acceptable for ``numpy.array``\'s\
``__getitem__``
"""
return self._data.__getitem__(key)
def __setitem__(self, key, value):
"""direct access to ``numpy.array``\'s index assignation
:param key: any objects acceptable for ``numpy.array``\'s\
``__setitem__``
"""
return self._data.__setitem__(key, value)
class Database(MutableMapping):
@property
def schema_version(self):
return '0'
def __init__(self, connection):
super(Database, self).__init__()
if isinstance(connection, sqlite3.Connection):
self.connection = connection
else:
self.connection = sqlite3.Connection(connection)
if not self.is_init():
self.__init_tables()
def __init_tables(self):
cur = self.connection.cursor()
cur.execute(
'CREATE TABLE metadata (key TEXT, value TEXT)'
)
cur.execute(
'CREATE TABLE tensor (' +
'id TEXT(22) PRIMARY KEY, ' +
'data BLOB, ' +
'attr BLOB)'
)
cur.executemany(
'INSERT INTO metadata (key, value) VALUES (?, ?)',
(('schema_version', self.schema_version),
('initialized_at', util.now()))
)
self.connection.commit()
def is_init(self):
"""return ``True`` if this object is initialized with current schema\
and return ``False`` otherwise.
"""
cur = self.connection.cursor()
masterdata = list(cur.execute(
'SELECT * FROM sqlite_master WHERE name="metadata"'
))
if len(masterdata) == 0:
return False
schema_version = list(cur.execute(
'SELECT value FROM metadata ' +
'WHERE key="schema_version"'
))
if len(schema_version) > 0 and \
schema_version[0][0] == self.schema_version:
return True
return False
def save(self, tensor, commit=True):
"""update or insert ``tensor`` into the table.
:param tensor: ``Tensor`` object
"""
cur = self.connection.cursor()
rec = list(cur.execute(
'SELECT id FROM ' +
'tensor WHERE id=?', (tensor.id, )
))
if len(rec) != 0:
cur.execute(
'UPDATE tensor SET data=?, attr=? WHERE id=?',
self.serialize(tensor)
)
else:
cur.execute(
'INSERT INTO tensor (data, attr, id) VALUES (?, ?, ?)',
self.serialize(tensor)
)
if commit:
self.connection.commit()
def erase(self, tensor_id, commit=True):
"""delete tensor whose id is ``tensor_id`` from the table.
:param tensor_id: ``str`` or ``Tensor`` object
"""
if isinstance(tensor_id, Tensor):
return self.erase(tensor_id.id, commit)
cur = self.connection.cursor()
cur.execute('DELETE FROM tensor WHERE id=?', (tensor_id, ))
if commit:
self.connection.commit()
def __getitem__(self, key):
if not isinstance(key, (str, bytes)):
return self.__getitem__(str(key))
d = self.connection.cursor().execute(
'SELECT data, attr, id FROM tensor WHERE id=?', (key, )
).fetchone()
if d is None:
raise KeyError(key)
return self.deserialize(d)
def __setitem__(self, key, value):
if isinstance(value, Tensor):
return self.save(Tensor(data=value.data, attr=value.attr, id=key))
return self.save(Tensor(data=np.array(value), attr=None, id=key))
def __delitem__(self, key):
return self.erase(key)
def __iter__(self):
for x in self.connection.cursor().execute(
'SELECT id FROM tensor'):
yield x[0]
def __len__(self):
return self.connection.cursor().execute(
'SELECT COUNT(*) FROM tensor'
).fetchone()[0]
@classmethod
def serialize(cls, tensor):
return (cls.serialize_array(tensor._data),
cls.serialize_attr(tensor._attr),
tensor._id)
@classmethod
def deserialize(cls, record):
return Tensor(
data=cls.deserialize_array(record[0]),
attr=cls.deserialize_attr(record[1]),
id=record[2]
)
@classmethod
def serialize_array(cls, data):
s = io.BytesIO()
if isinstance(data, Mapping):
| np.savez(s, **data) | numpy.savez |
#!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
# 配置文件
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
"""
:param words_list: [[w11, w12, ...], [w21, w22, ...], ...]
:return:
"""
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
# 将多标签意图转为 one_hot
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
# 截断
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", | mean(text_len) | numpy.core.fromnumeric.mean |
import sys
from typing import Tuple
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker, cm
from scipy import optimize
import multiprocessing as mp
import time
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# FUNCTIONS AND VARS
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# Display additional info for debugging
DEBUG_MODE = False
# File with all the data
fileName = "SMBH/Data/OrbitData2017.txt"
# Output file for MCD
outputFile = "SMBH/Data/Output.txt"
# store last orbit data in file
OrbitFileForewrd = "SMBH/Orbit/foreward.txt"
OrbitFileBackwrd = "SMBH/Orbit/backward.txt"
# Gravitational Constant in astronomy Units
GLOB_G = 4.30091E-3
# pc/yr to km/s conversion factor
GLOB_PcYrKmS = 997813.106
# mas to radians conversion factor
GLOB_masToRad = 4.8481368110954E-9
# 1 second to year conversion factor
GLOB_SecToYr = 3.17098E-8
# pc to km conversion factor
GLOB_PcToKm = 3.086E13
# speed of light in km/s
GLOB_c = 299792.458
#counter
GLOB_counter = 0
# Global Bounds of SGR A* Position in mas -- (del x, del y , del z)
GLOB_SGRA_Pos = np.array([0.2, 0.2, 0.2])
# IMPORT AND DATA HANDLING
class FitContainer():
'''
Container for all the Data needed for a Fit
'''
success = False
Mass = -1
Distance = -1
initR = None
initV = None
PosPath = None
VPath = None
ErrRA = None
ErrDE = None
ErrVR = None
OrbitNumber = 0
OrbElem = None
PositionArray = None
VelocityArray = None
def __init__(self, ParVec, _oN = 1, success = True, _orb=None, _PosArr = None, _VelArr = None):
Pars = ParVecToParams(ParVec)
self.Mass = Pars[0]
self.Distance = Pars[3]
self.initR = Pars[1]
self.initV = Pars[2]
self.OrbitNumber = _oN # number of orbits infront of index data
self.success = success
self.OrbElem = _orb
self.PositionArray = _PosArr
self.VelocityArray = _VelArr
if type(_PosArr) == np.ndarray:
_tmp = PosRealToRad(self.PositionArray, self.Distance)
self.PosPath = np.array( [ _tmp[:,0], _tmp[:,1] ] )
self.VPath = np.array( self.VelocityArray[:,2] )
def initErrorData(self, _RA, _DE, _VR):
self.ErrRA = _RA
self.ErrDE = _DE
self.ErrVR = _VR
def getChi2(self, bReduced:bool=False) -> float:
if type(self.ErrRA) == list:
lenPos = len(self.ErrRA[3])
lenVel = len(self.ErrVR[3])
NPos = lenPos / (lenPos + lenVel)
NVel = lenVel / (lenPos + lenVel)
if bReduced:
_t = (NPos*self.ErrRA[2] + NPos*self.ErrDE[2] + NVel*self.ErrVR[2])
#_t = (self.ErrRA[2] + self.ErrDE[2] + self.ErrVR[2])
return _t / (2*lenPos + lenVel)
else:
return (NPos*self.ErrRA[2] + NPos*self.ErrDE[2] + NVel*self.ErrVR[2])
#return (self.ErrRA[2] + self.ErrDE[2] + self.ErrVR[2])
else:
return 1E10
def returnParVec(self) -> list:
return [self.Mass, *self.initR, *self.initV, self.Distance]
class DataContainer():
'''
Stores the Data from a Star
'''
TimeP = None
TimeV = None
RA = None
eRA = None
DE = None
eDE = None
VR = None
eVR = None
def __init__(self, _stNr, _SData):
'''
_stNr -- Star Number
_SData -- Star Data, already extracted from Total Data (see readTable)
'''
if _SData:
self.StarNr = _stNr
scale = np.sqrt(5)/2.60335 # for weighted
#scale = np.sqrt(5)/2.192 # for unweighted
#positional data
self.TimeP = np.array([x["time"] for x in _SData["pos"]] )
self.RA = np.array([x["RA"] for x in _SData["pos"]] )
self.DE = np.array([x["DE"] for x in _SData["pos"]] )
self.eRA = np.array([x["e_RA"] * scale for x in _SData["pos"]] )
self.eDE = np.array([x["e_DE"] * scale for x in _SData["pos"]] )
#RVel data
self.VR = np.array([x["RVel"] for x in _SData["rad"]] )
self.eVR = np.array([x["e_RVel"] * scale for x in _SData["rad"]] )
self.TimeV = np.array([x["time"] for x in _SData["rad"]] )
#print("len of Data N = ", len(self.RA) + len(self.DE) + len(self.VR))
# create and return a copy of this object
def copy(self):
_t = DataContainer(self.StarNr, None)
_t.TimeP = self.TimeP
_t.RA = self.RA
_t.DE = self.DE
_t.eRA = self.eRA
_t.eDE = self.eDE
_t.VR = self.VR
_t.eVR = self.eVR
_t.TimeV = self.TimeV
return _t
class OrbitElem():
def __init__(self, _a, _e, _omega, _LAN, _i, _M, _T, _nu):
"""
Container for saving orbital Elements
Parameters
----------
_a : semi mayor axis
_e : eccentricity
_omega : argument of periapsis
_LAN : longitude of ascending node
_i : inclination
_M : Mean anomaly
_T : Period
"""
self.MayAxis = _a
self.Ecc = _e
self.ArgPeri = _omega
self.LAN = _LAN
self.Incl = _i
self.MeanM = _M
self.Period = _T
self.TAnom = _nu
def readTable(_fName:str) -> tuple:
'''
reads the file. Returns (Data in rows, Header in the form [1st data, 2nd data,...])
Format: (Data[row][point], Header[point][more info])
'''
_file = open(_fName, 'r')
_data = _file.readlines()
_file.close()
# this is where the header starts
_index = 10
_data_header = []
#------------------------------------------------
# read the header
#------------------------------------------------
while True:
if _data[_index][0] == "-":
break
#this is a dataline
_line = _data[_index]
_byte = int(_line[0:4].strip())
_byte_end = int(_line[5:8].strip())
_format = _line[9:15].strip()
_unit = _line[16:23].strip()
_label = _line[24:33].strip()
_desc = _line[34:].strip()
_data_header.append([])
_data_header[_index - 10].append(_byte)
_data_header[_index - 10].append(_byte_end)
_data_header[_index - 10].append(_format)
_data_header[_index - 10].append(_unit)
_data_header[_index - 10].append(_label)
_data_header[_index - 10].append(_desc)
_index += 1
# this is where the data starts
_index = 134
_acData = []
#------------------------------------------------
# read the data
#------------------------------------------------
while True:
#file end
if not _data[_index]:
break
_line = _data[_index]
_acData.append([])
for i in range(len(_data_header)):
_acData[_index-134].append(_line[ _data_header[i][0] - 1: _data_header[i][1] ].strip() )
if _index+1 < len(_data):
_index += 1
else:
break
return (_acData, _data_header)
def EmptyCheck(_data:list, _index:int, _indexList:list) -> bool:
"""
check if any element (index given by indexList) in Data is non zero
return True if any element is non zero; used for return_StarExistingData
"""
for i in _indexList:
if ( _data[0][_index][i] != '' ):
return True
return False
def return_StarExistingData(_data:list, StarNr:int) -> dict:
"""
return data for specific Star
IN: (raw_data, (int) Star Number)
OUT: StarData
FORMAT: [ Data["pos"], Data["rad"] ]
Data["pos"]..."time", "RA", "e_RA", "DE", "e_DE"
Data["rad"]..."time", "RVel", "e_RVel"
"""
_header = _data[1]
firstStarElem = "oRA-S" + str(StarNr)
_index = -1
for i in range(len(_header)):
#is label the same
if _header[i][4] == firstStarElem:
_index = i
break
#wrong label => wrong star number
if _index < 0:
return []
#_StarData = []
#dictionary containing positional data and radial data seperately
_StarData = dict( [ ("pos", []), ("rad", []) ] )
#form a dict from the data
#FORMAT: time, RA, e_RA, DE, e_DE // RVel, e_RVel
for i in range(len(_data[0])):
#_data[0][i] is the i-th row of data; [1] is the flag position
#check flag; a = position
if (_data[0][i][1] == "a"):
#is the star data not empty; _index is starting index of star data
#check for all positional data
if (EmptyCheck(_data, i, [ _index, _index+1, _index+2,_index+3 ] ) ):
_StarData["pos"].append( dict( [
("time", float(_data[0][i][0])), #date
("RA", float(_data[0][i][_index])), #Right ascention
("e_RA", float(_data[0][i][_index+1])), #Error RA
("DE", float(_data[0][i][_index+2])), #Declination
("e_DE", float(_data[0][i][_index+3])) #Error DE
] ) )
#check if rad flag
elif (_data[0][i][1] == "rv"):
if (EmptyCheck(_data, i, [_index+4,_index+5] ) ):
_StarData["rad"].append( dict( [
("time", float(_data[0][i][0])), #date
("RVel", float(_data[0][i][_index+4])), #radial velocity
("e_RVel", float(_data[0][i][_index+5])) #Error RVel
]))
return _StarData
# ORBITAL ELEMENTS
def getT(r0:np.ndarray, v0:np.ndarray, _M:float) -> float:
'''
returns the Period of one orbit for given state vector and Mass
'''
_a = 1/( 2/np.linalg.norm(r0) - np.linalg.norm(v0)**2/(GLOB_G * _M) ) # a in pc
_t = 2*np.pi * np.sqrt( (_a**3)/(GLOB_G*_M) ) # Units = sec * pc / km
return _t * GLOB_SecToYr * GLOB_PcToKm # convert to year plus additional length factor
def getOrbitalElements(_parVec:list) -> OrbitElem:
"""
Returns all Orbital Elements and the Period, packed into a data class
Parameters
----------
_parVec : Parameter Vector for current orbit
Returns
-------
OrbitalElem Object
"""
Pars = ParVecToParams(_parVec)
M = Pars[0]
r0 = Pars[1]
v0 = Pars[2]
# momentum vector
h = np.cross(r0, v0)
# eccentricity vector
e = np.cross(v0, h) / (GLOB_G*M) - r0/np.linalg.norm(r0)
# eccentricity
e_norm = np.linalg.norm(e)
n = np.array( [-h[0], h[1], 0] )
# true anomaly
nu = np.arccos( np.dot(e, r0) / (e_norm * np.linalg.norm(r0)) )
if np.dot(r0, v0) < 0:
nu = 2*np.pi - nu
# inclination
i = np.arccos(h[2] / np.linalg.norm(h))
# eccentric Anomaly
E = 2* np.arctan( np.tan(nu/2) / ( np.sqrt( (1+e_norm)/(1-e_norm) ) ) )
# LAN
LAN = np.arccos( n[0] / np.linalg.norm(n) )
if n[1] < 0:
LAN = 2*np.pi - LAN
# argument of periapsis
omega = np.arccos( np.dot(n, e)/ (np.linalg.norm(n) * e_norm) )
if e[2] < 0:
omega = 2*np.pi - omega
# mean anomaly
MeanM = E - e_norm*np.sin(E)
# semi mayor axis
a = 1/( 2/np.linalg.norm(r0) - np.linalg.norm(v0)**2/(GLOB_G * M) )
_t = 2*np.pi * np.sqrt( (np.clip(a, 0, a)**3)/(GLOB_G*M) ) # Units = sec * pc / km
T = _t * GLOB_SecToYr * GLOB_PcToKm
_OE = OrbitElem(a, e_norm, omega * 180 / np.pi, LAN * 180 / np.pi, i * 180 / np.pi, MeanM * 180 / np.pi, T, nu)
return _OE
def OE_Essentials(_parVec:list) -> OrbitElem:
"""
Only calculate e and T to be bounds checked for fit
Parameters
----------
_parVec : Parameter Vector for current orbit
Returns
-------
OrbitalElem Object
"""
Pars = ParVecToParams(_parVec)
M = Pars[0]
r0 = Pars[1]
v0 = Pars[2]
# momentum vector
h = np.cross(r0, v0)
# eccentricity vector
e = np.cross(v0, h) / (GLOB_G*M) - r0/np.linalg.norm(r0)
# eccentricity
e_norm = np.linalg.norm(e)
# semi mayor axis
a = 1/( 2/np.linalg.norm(r0) - np.linalg.norm(v0)**2/(GLOB_G * M) )
_t = 2*np.pi * np.sqrt( (np.clip(a, 0, a)**3)/(GLOB_G*M) ) # Units = sec * pc / km
T = _t * GLOB_SecToYr * GLOB_PcToKm
_OE = OrbitElem(a, e_norm, 0, 0, 0, 0, T, 0)
return _OE
# UTILITY
def PosRadToReal(_r:np.ndarray, _dist:float) -> np.ndarray:
'''
converts the first 2 radial elements to real distance, given the distance
returns position vector in pc
'''
return _r*_dist*GLOB_masToRad
def RadToReal(_x:float, _dist:float) -> float:
'''
return distance in pc for one coordinate
'''
return _x*_dist*GLOB_masToRad
def PosRealToRad(_r:np.ndarray, _dist:float) -> np.ndarray:
'''
converts the real position to radial position in the first 2 elements.
Used for plotting only
returns postion vector with units ('','',pc)
'''
_t = np.array([ _dist*GLOB_masToRad, _dist*GLOB_masToRad, 1 ])
return _r/_t
def potential(r:np.ndarray,v:np.ndarray,_M:float, r_SGRA:np.ndarray=np.array([0,0,0])) -> np.ndarray:
"""
return Kepler acceleration
Parameters
----------
r : [vector]
position of particle to evaluate potential at
_M : [scalar]
Mass of central object
Returns
-------
Potential Strength
"""
# true distance from star to srg a*
dist = r - r_SGRA
return -(GLOB_G*_M*dist) / (np.linalg.norm(dist)**3)
def potentialSchwarz(r:np.ndarray,v:np.ndarray,_M:float, r_SGRA:np.ndarray=np.array([0,0,0])) -> np.ndarray:
"""
return the Schwarzschild acceleration
Parameters
----------
r : [vector]
position of particle to evaluate potential at
v : [vector]
velocity of particle
_M : [scalar]
Mass of central object
Returns
-------
Schwarzschild Potential Strength: Kepler Potential + a/r^3
"""
h = np.cross(r,v) # specific angular momentum
kepl = potential(r,v,_M)
Schw = (3 * GLOB_G * _M * np.dot(h,h) * r) / (GLOB_c**2 * np.linalg.norm(r)**5)
return kepl + Schw
def VerletStep(h:float,r0:np.ndarray,v0:np.ndarray,f0:np.ndarray,_M:float, r_SGRA:np.ndarray=np.array([0,0,0])) -> np.ndarray:
"""
Orbital Integration using the Verlet Algorithm
Parameters
----------
h : [scalar]
stepsize -> delta t
r0 : [vector]
position of particle from last evaluation
v0 : [vector]
velocity of particle from last evaluation
f0 : [scalar]
potential strength from last evaluation step
_M : [scalar]
Mass of central object
func : [function]
Potential function to evaluate
Returns
-------
[r1, v1, f1]
position, velocity and potential of new step
"""
pp = np.add(v0, h/2*f0) # 1/2 Delta velocity
r1 = np.add(r0, h*pp) # new position = r0 + del v*del t
f1 = potential(r1,v0,_M, r_SGRA) # new potential at new position
v1 = np.add(pp, h/2*f1) # new velocity = v0 + 1/2 del a0*del t + 1/2 del a1*del t
return np.array([r1,v1,f1])
def VerletStepSchwarz(h:float,r0:np.ndarray,v0:np.ndarray,f0:np.ndarray,_M:float, r_SGRA:np.ndarray=np.array([0,0,0])) -> np.ndarray:
"""
Orbital Integration using the Verlet Algorithm
Parameters
----------
h : [scalar]
stepsize -> delta t
r0 : [vector]
position of particle from last evaluation
v0 : [vector]
velocity of particle from last evaluation
f0 : [scalar]
potential strength from last evaluation step
_M : [scalar]
Mass of central object
func : [function]
Potential function to evaluate
Returns
-------
[r1, v1, f1]
position, velocity and potential of new step
"""
pp = np.add(v0, h/2*f0) # 1/2 Delta velocity
r1 = np.add(r0, h*pp) # new position = r0 + del v*del t
f1 = potentialSchwarz(r1,pp,_M) # new potential at new position
v1 = np.add(pp, h/2*f1) # new velocity = v0 + 1/2 del a0*del t + 1/2 del a1*del t
return np.array([r1,v1,f1])
def returnDataError(rData:np.ndarray, rDataErr:np.ndarray, rTime:np.ndarray, Fake:np.ndarray, fTimeEnd:float) -> list:
"""
evaluates how much fake deviates from data
data and fake must begin at the same point, for this to work
Parameters
----------
rData : np.ndarray
real Data to compare Fake Data against
rDataErr : np.ndarray
Error for real Data, used in chi calculation
rTime : np.ndarray
Timestamps for all real Data points
Fake : np.ndarray
Fake Data points that will be compared to real Data
fTimeEnd : float
Total End Time of Fake Data, this function will create its own time array based on this value
Returns
-------
[ x_time, y_UsedData, chi^2 value]
"""
# create timing for fake data
fakeTimeline = np.linspace(0,fTimeEnd, len(Fake))
newTimeOfFake = np.empty(len(rTime))
newValues = np.empty(len(rTime))
j = 0
# determine closest fakeTime for every measured timestamp
# if fake orbit shorter than measured time => last measured points get ignored
# if fake orbit longer than measured time => last fake points get ignored
for i in range(len(rTime)):
for k in range(j, len(fakeTimeline)):
if (fakeTimeline[k] >= rTime[i]):
newTimeOfFake[i] = fakeTimeline[k]
newValues[i] = Fake[k]
j = k
break
chi2 = ((rData - newValues)/rDataErr)**2
return [newTimeOfFake, newValues, np.sum( chi2 ), chi2]
def returnCombinedError(StarData:DataContainer, FitData:FitContainer, _in, redshiftCorr:bool = False) -> float:
"""
combines all measurement errors
Parameters
----------
StarData : DataContainer
The Star Data
FitData : FitContainer
The Fit Data, prior to any Error Calculation, Error will be overwritten
_in : [_index_R, _index_V]
Index point of starting data
redshiftCorr : bool
use redshift correction in error calculation? Only for Schwarzschild potential
Returns
-------
chi^2 value for current parameters
"""
if FitData.success:
# create timing for fake data
_eT = StarData.TimeP[_in[0]] - StarData.TimeP[0] + FitData.OrbitNumber * FitData.OrbElem.Period
# error on every measurement
Err_RA = returnDataError(StarData.RA, StarData.eRA, StarData.TimeP - StarData.TimeP[0], FitData.PosPath[0], _eT)
Err_DE = returnDataError(StarData.DE, StarData.eDE, StarData.TimeP - StarData.TimeP[0], FitData.PosPath[1], _eT)
# rad vel points need to be shifted by the same amount as the position data for consistency
if redshiftCorr:
fakeTimeline = np.linspace(0,_eT, len(FitData.VPath))
j = 0
rTime = StarData.TimeV - StarData.TimeP[0]
#newVR_Timeline = np.empty(len(rTime))
LengthAtVR = np.empty(len(rTime))
newFakeVR = np.empty(len(rTime))
for i in range(len(rTime)):
for k in range(j, len(fakeTimeline)):
if (fakeTimeline[k] >= rTime[i]):
#newVR_Timeline[i] = fakeTimeline[k]
LengthAtVR[i] = np.linalg.norm(FitData.PositionArray[k]) #FitData.PositionArray[k][2] #
newFakeVR[i] = FitData.VPath[k]
j = k
break
PN_VR = StarData.VR - getGravRedshift(FitData.Mass, LengthAtVR)
_chi2 = ((PN_VR - newFakeVR)/StarData.eVR)**2
Err_Vz = [StarData.TimeV - StarData.TimeP[0], PN_VR, np.sum( _chi2 ), _chi2]
else:
Err_Vz = returnDataError(StarData.VR, StarData.eVR, StarData.TimeV - StarData.TimeP[0], FitData.VPath, _eT)
lenPos = len(StarData.RA)
lenVel = len(StarData.VR)
NPos = lenPos / (lenPos + lenVel)
NVel = lenVel / (lenPos + lenVel)
#print("len: ", len(Err_RA[3]) + len(Err_DE[3]) + len(Err_Vz[3]))
FitData.initErrorData(Err_RA, Err_DE, Err_Vz) # save individual errors in FitData
# chi^2 value
#chi2 = (Err_RA[2] + Err_DE[2] + Err_Vz[2])
chi2 = (NPos * Err_RA[2] + NPos * Err_DE[2] + NVel * Err_Vz[2])
#Nlen = len(Err_RA[3]) + len(Err_DE[3]) + len(Err_Vz[3])
#chi2 = chi2/Nlen
return chi2
else:
return 1E10
def returnCombinedErrorFromFile(SD:DataContainer, FitData:FitContainer, _in) -> float:
_OFile = open(OrbitFileForewrd, 'r')
_line = _OFile.readline()
NumberLines = -2
StartBackwards = -1
while _line:
NumberLines += 1
if _line[0] == '#' and NumberLines > 0:
StartBackwards = NumberLines
_line = _OFile.readline()
_OFile.close()
_OFile = open(OrbitFileForewrd, 'r')
_line = _OFile.readline()
chi2RA = 0
chi2DE = 0
chi2VR = 0
fCount = -1
PositionRealTime = SD.TimeP - SD.TimeP[0]
VelocityRealTime = SD.TimeV - SD.TimeP[0]
# end of time
_eT = SD.TimeP[_in[0]] - SD.TimeP[0] + FitData.OrbitNumber * FitData.OrbElem.Period
# time from index point to ent of time
fakeTimeline = np.linspace(SD.TimeP[_in[0]] - SD.TimeP[0], _eT, StartBackwards - 1)
fakeTimelineBack = np.linspace(0, SD.TimeP[_in[0]] - SD.TimeP[0], NumberLines - StartBackwards)
fakeTimelineBack = np.flip(fakeTimelineBack)
rUsedF = []
vUsedF = []
RAIndex = 0
VRIndex = 0
count = 1
# forward
while _line:
if count > StartBackwards:
break
count += 1
_t = _line.strip()
_line = _OFile.readline()
if _t[0] != '#':
_t = _t.split(" ")
_t = [float(x) for x in _t]
r = np.array(_t[:3])
v = np.array(_t[3:])
if fakeTimeline[count-1] >= PositionRealTime[RAIndex]:
rUsedF.append(r)
RAIndex = count - 1
if fakeTimeline[count - 1] >= VelocityRealTime[VRIndex]:
vUsedF.append(v)
VRIndex = count - 1
_OFile.close()
_OFile = open(OrbitFileForewrd, 'r')
_line = _OFile.readline()
count = 1
rUsedB = []
vUsedB = []
while _line:
if count < StartBackwards:
_line = _OFile.readline()
else:
_t = _line.strip()
_line = _OFile.readline()
_t = _t.split(" ")
_t = [float(x) for x in _t]
r = np.array(_t[:3])
v = np.array(_t[3:])
if fakeTimeline[count-1] >= PositionRealTime[RAIndex]:
rUsedB.append(r)
RAIndex = count - 1
if fakeTimeline[count - 1] >= VelocityRealTime[VRIndex]:
vUsedB.append(v)
VRIndex = count - 1
if FitData.success:
# create timing for fake data
_eT = SD.TimeP[_in[0]] - SD.TimeP[0] + FitData.OrbitNumber * FitData.OrbElem.Period
# error on every measurement
Err_RA = returnDataError(SD.RA, SD.eRA, SD.TimeP - SD.TimeP[0], FitData.PosPath[0], _eT)
Err_DE = returnDataError(SD.DE, SD.eDE, SD.TimeP - SD.TimeP[0], FitData.PosPath[1], _eT)
Err_Vz = returnDataError(SD.VR, SD.eVR, SD.TimeV - SD.TimeP[0], FitData.VPath, _eT)
FitData.initErrorData(Err_RA, Err_DE, Err_Vz) # save individual errors in FitData
# chi^2 value
chi2 = (Err_RA[2] + Err_DE[2] + Err_Vz[2])
return chi2
else:
return 1E10
def returnSpecificChi2Point(SD:DataContainer, ParVec:list, _in:list, kwargs:dict={}) -> float:
OrbEl = getOrbitalElements(ParVec)
NewFitData = getOrbit(SD=SD, Orb=OrbEl, ParamVec=ParVec, index=_in[0], max_iter=10E6, stepsize=kwargs['Stepsize'], kwargs=kwargs)
x = returnCombinedError(SD, NewFitData, _in, kwargs['grav-red'])
return x
def ParVecToParams(_list:list) -> list:
"""
Returns parameter list for use in orbit parsing
Parameters
----------
_list : ParVec
Returns
-------
[Mass, R vec, V vec, Distance]
"""
_M = _list[0]
_r = np.array( [_list[1], _list[2], _list[3]] )
_v = np.array( [_list[4], _list[5], _list[6]] )
_d = _list[7]
return [_M, _r, _v, _d]
def generateMCData(OldData:np.ndarray, Error:np.ndarray) -> np.ndarray:
"""
Generate a new set of points given the old data and the error bars.
All points are within 1 sigma scaled with error
Parameters
----------
OldData : list
Data points in given Coorinate
Error : list
coresponding errors
Returns
-------
list
new set of datapoints (of same length)
"""
sig = np.random.normal(0,1,len(OldData))
newData = OldData + sig * Error
return newData
def ProgressBar(count:int, total:int, status:str=''):
'''
a simple progressbar to keep output clean
'''
barLen = 60
fillLen = int(round(barLen*count/float(total)))
percent = round(100*count/float(total),1)
bar = '='*fillLen + '-'*(barLen-fillLen)
sys.stdout.write('[%s] %s%s (%s) ... %s\r' % (bar, percent, '%', count, status))
sys.stdout.flush()
def NoProgressBar(count:int, status:str=''):
'''
Display clean Message without progressbar
'''
sys.stdout.write('(%s) ... %s\r' % (count, status))
sys.stdout.flush()
def getGravRedshift(M:float, r:np.ndarray) -> np.ndarray:
"""
returns the velocity change due to gravitational redshift
Parameters
----------
M : float
Mass of Black hole
r : np.ndarray
current position of star
Returns
-------
Delta RVel : float
radialvelocity correction
"""
# Schwarzschild radius
rs = 2 * GLOB_G * M / GLOB_c**2
# redshift
z = ( 1 / np.sqrt( 1 - rs/r ) ) - 1
return GLOB_c * z
# PLOT FUNCTIONS
def plot4Ways(_fig, SD:DataContainer, FD:FitContainer = None, _in:list = [-1,-1], _fName:str = None):
"""
plot 4 diagrams showing position and time dependency of data, plus FitData, if available
Parameters
----------
SD : DataContainer
StarData
FD : FitContainer
FitData, can be None
_fig : Reference to main Figure
_fName : str, optional
save plot as file, by default "frame0001"
showGraph : bool, optional
Show Figure, by default True
_in: [_index_R, _index_V]
if set, draw a point around the Index point
"""
showFit = True
if not FD:
showFit = False
#-------------------------------------------------------------
# CONFIG
StarColor = 'black'
StarErr = 'blue'
_ms=3 # marker size
chi2Color = 'tab:orange'
_fig.clf()
F = []
for i in range(4):
_tf = _fig.add_subplot(2,2,i+1)
_tf.set_axisbelow(True)
_tf.grid(True)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
F.append(_tf)
F[0].set_aspect('equal', 'box')
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
#-------------------------------------------------------------
#x-y
F[0].set_xlabel(r'RA [mas]', {'size':14})
F[0].set_ylabel(r'DE [mas]', {'size':14})
#vR-time
F[1].set_xlabel(r'time - ' + str(SD.TimeP[0]) + ' [yr]', {'size':14})
F[1].set_ylabel(r'RVel [km/s]', {'size':14})
#x-time (RA)
F[2].set_xlabel(r'time - ' + str(SD.TimeP[0]) + ' [yr]', {'size':14})
F[2].set_ylabel(r'RA [mas]', {'size':14})
#y-time (DE)
F[3].set_xlabel(r'time - ' + str(SD.TimeP[0]) + ' [yr]', {'size':14})
F[3].set_ylabel(r'DE [mas]', {'size':14})
#-------------------------------------------------------------
#Real Data
#center
F[0].scatter(0,0,c="red", marker='+', label='center', s=50)
#position x-y
F[0].errorbar(SD.RA, SD.DE, xerr=SD.eRA, yerr=SD.eDE, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) + ' Orbit', ms=_ms)
#vR-time
F[1].errorbar(SD.TimeV - SD.TimeP[0], SD.VR, yerr=SD.eVR, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) +' RVel', ms=_ms, zorder=2)
#x-time
F[2].errorbar(SD.TimeP - SD.TimeP[0], SD.RA, yerr=SD.eRA, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) + ' RA',ms=_ms, zorder=2)
#y-time
F[3].errorbar(SD.TimeP - SD.TimeP[0], SD.DE, yerr=SD.eDE, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) + ' DE',ms=_ms, zorder=2)
#-------------------------------------------------------------
#fake Data
if showFit:
# This is the length of the fake data. From Index point it extends some integer amount orbit to front
# and backwards it extends to 0, because of float orbit number for back direction
# subtract first time measure for relative data
DataLenFromIndex = SD.TimeP[_in[0]] - SD.TimeP[0] + FD.OrbitNumber * FD.OrbElem.Period
fake_time = np.linspace(0,DataLenFromIndex, len(FD.PosPath[0]))
# end of real data, in relative units
END_Data = SD.TimeP[-1] - SD.TimeP[0]
# update time to only display relevant data
fake_time = [x for x in fake_time if x < END_Data]
#length of relevant data, used for truncating fake data
FI = len(fake_time)
# init points used for chi^2 and remove duplicates
# 0 - time; 1 - values
chi2RA = FD.ErrRA
chi2DE = FD.ErrDE
chi2RVel = FD.ErrVR
SimRA = [x for x in FD.PosPath[0][:FI] if x not in chi2RA[1]]
SimRA_Time = [x for x in fake_time if x not in chi2RA[0]]
SimDE = [x for x in FD.PosPath[1][:FI] if x not in chi2DE[1]]
SimDE_Time = [x for x in fake_time if x not in chi2DE[0]]
SimRV = [x for x in FD.VPath[:FI] if x not in chi2RVel[1]]
SimRV_Time = [x for x in fake_time if x not in chi2RVel[0]]
#-------------------------------------------------------------
# Simulation data points
#position x-y
F[0].plot(FD.PosPath[0], FD.PosPath[1], c='tab:blue', label='Fit')
#vR-time
F[1].plot(fake_time, FD.VPath[:FI], label='sim RVel', zorder=1)
#x-time
F[2].plot(SimRA_Time, SimRA, label='sim RA', zorder=1)
#y-time
F[3].plot(SimDE_Time, SimDE, label='sim DE', zorder=1)
#-------------------------------------------------------------
# simulation points used in chi^2
#F[1].scatter(FD.ErrVR[0], FD.ErrVR[1], label=r'$\chi^2$ points', c=chi2Color, s=_ms, zorder=3) #vR - vz
#F[2].scatter(FD.ErrRA[0], FD.ErrRA[1], label=r'$\chi^2$ points', c=chi2Color, s=_ms, zorder=3) #RA - x
#F[3].scatter(FD.ErrDE[0], FD.ErrDE[1], label=r'$\chi^2$ points', c=chi2Color, s=_ms, zorder=3) #DE - y
#-------------------------------------------------------------
# draw index point
if (_in[0] > 0 and _in[1] > 0):
F[1].scatter(SD.TimeV[_in[1]] - SD.TimeP[0], SD.VR[_in[1]], label=r'Index', s=20, color='red', zorder=99) #vR - vz
F[2].scatter(SD.TimeP[_in[0]] - SD.TimeP[0], SD.RA[_in[0]], label=r'Index', s=20, color='red', zorder=99) #RA - x
F[3].scatter(SD.TimeP[_in[0]] - SD.TimeP[0], SD.DE[_in[0]], label=r'Index', s=20, color='red', zorder=99) #DE - y
#-------------------------------------------------------------
# Print Orbit Elements left of screen
OrbElem = FD.OrbElem
RoundTo = 3
# Mass
plt.figtext(0.01,0.7, r"M [$10^6 M_\odot$] =" + str( np.round(FD.Mass/1E6, RoundTo) ) )
print("M = ", FD.Mass/1E6)
# Distance
plt.figtext(0.01,0.65, "R [kpc] =" + str( np.round(FD.Distance/1E3, RoundTo) ) )
print("D = ", FD.Distance/1E3)
# Period
plt.figtext(0.01,0.6, "T [yr] =" + str( np.round(OrbElem.Period, RoundTo) ) )
print("T = ", OrbElem.Period)
# Eccentricity
plt.figtext(0.01,0.55, "e [1] =" + str( np.round(OrbElem.Ecc, RoundTo) ) )
print("e = ", OrbElem.Ecc)
# Semi Mayor Axis
plt.figtext(0.01,0.45, "a [pc] =" + str( np.round(OrbElem.MayAxis, RoundTo) ) )
print("a = ", OrbElem.MayAxis)
# Inclination
plt.figtext(0.01,0.4, r"i [$^\circ$] =" + str( np.round(OrbElem.Incl, RoundTo) ) )
print("i = ", OrbElem.Incl)
# Longitude of ascending node
plt.figtext(0.01,0.35, r"$\Omega$ [$^\circ$] =" + str( np.round(OrbElem.LAN, RoundTo) ) )
print("LAN = ", OrbElem.LAN)
# argument of periapsis
plt.figtext(0.01,0.3, r"$\omega$ [$^\circ$] =" + str( np.round(OrbElem.ArgPeri, RoundTo) ) )
print("omega = ", OrbElem.ArgPeri)
for i in range(len(F)):
F[i].legend(loc='best', fontsize=12)
if _fName:
plt.savefig("SMBH/Data/dump/" + _fName)
def plot2Ways(_fig, SD:DataContainer, FD:FitContainer = None, _in:list = [-1,-1], _fName:str = None):
"""
plot 2 diagrams showing position and Radial Velocity over Time
Parameters
----------
SD : DataContainer
StarData
FD : FitContainer
FitData, can be None
_fig : Reference to main Figure
_fName : str, optional
save plot as file, by default "frame0001"
showGraph : bool, optional
Show Figure, by default True
_in: [_index_R, _index_V]
if set, draw a point around the Index point
"""
showFit = True
if not FD:
showFit = False
#-------------------------------------------------------------
# CONFIG
StarColor = 'black'
StarErr = 'gray'
_ms=3 # marker size
chi2Color = 'tab:orange'
_fig.clf()
F = []
for i in range(2):
_tf = _fig.add_subplot(1,2,i+1)
_tf.set_axisbelow(True)
_tf.grid(True)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
F.append(_tf)
F[0].set_aspect('equal', 'box')
#-------------------------------------------------------------
#x-y
F[0].set_xlabel(r'RA [mas]', {'size':14})
F[0].set_ylabel(r'DE [mas]', {'size':14})
#vR-time
F[1].set_xlabel(r'time - ' + str(SD.TimeP[0]) + ' [yr]', {'size':14})
F[1].set_ylabel(r'RVel [km/s]', {'size':14})
#-------------------------------------------------------------
#Real Data
#center
F[0].scatter(0,0,c="red", marker='+', label='center', s=50)
#position x-y
F[0].errorbar(SD.RA, SD.DE, xerr=SD.eRA, yerr=SD.eDE, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) + ' Orbit', ms=_ms)
#vR-time
F[1].errorbar(SD.TimeV - SD.TimeP[0], SD.VR, yerr=SD.eVR, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) +' RVel', ms=_ms, zorder=2)
#-------------------------------------------------------------
#fake Data
if showFit:
# This is the length of the fake data. From Index point it extends some integer amount orbit to front
# and backwards it extends to 0, because of float orbit number for back direction
# subtract first time measure for relative data
DataLenFromIndex = SD.TimeP[_in[0]] - SD.TimeP[0] + FD.OrbitNumber * FD.OrbElem.Period
fake_time = np.linspace(0,DataLenFromIndex, len(FD.PosPath[0]))
# end of real data, in relative units
END_Data = SD.TimeP[-1] - SD.TimeP[0]
# update time to only display relevant data
fake_time = [x for x in fake_time if x < END_Data]
#length of relevant data, used for truncating fake data
FI = len(fake_time)
# init points used for chi^2 and remove duplicates
# 0 - time; 1 - values
chi2RVel = FD.ErrVR
SimRV = [x for x in FD.VPath[:FI] if x not in chi2RVel[1]]
SimRV_Time = [x for x in fake_time if x not in chi2RVel[0]]
#-------------------------------------------------------------
# Simulation data points
#position x-y
F[0].plot(FD.PosPath[0], FD.PosPath[1], c='tab:blue', label='Fit')
#vR-time
F[1].plot(fake_time, FD.VPath[:FI], label='sim RVel', zorder=1)
#-------------------------------------------------------------
# simulation points used in chi^2
#F[1].scatter(FD.ErrVR[0], FD.ErrVR[1], label=r'$\chi^2$ points', c=chi2Color, s=_ms, zorder=3) #vR - vz
#-------------------------------------------------------------
# draw index point
if (_in[0] > 0 and _in[1] > 0):
F[1].scatter(SD.TimeV[_in[1]] - SD.TimeP[0], SD.VR[_in[1]], label=r'Index', s=20, color='red', zorder=99) #vR - vz
#-------------------------------------------------------------
# Print Orbit Elements left of screen
OrbElem = FD.OrbElem
RoundTo = 3
# Mass
plt.figtext(0.01,0.7, r"M [$10^6 M_\odot$] =" + str( np.round(FD.Mass/1E6, RoundTo) ), {'size':16} )
print("M = ", FD.Mass/1E6)
# Distance
plt.figtext(0.01,0.65, "R [kpc] =" + str( np.round(FD.Distance/1E3, RoundTo) ), {'size':16} )
print("D = ", FD.Distance/1E3)
# Period
plt.figtext(0.01,0.6, "T [yr] =" + str( np.round(OrbElem.Period, RoundTo) ), {'size':16} )
print("T = ", OrbElem.Period)
# Eccentricity
plt.figtext(0.01,0.55, "e [1] =" + str( np.round(OrbElem.Ecc, RoundTo) ), {'size':16} )
print("e = ", OrbElem.Ecc)
# Semi Mayor Axis
plt.figtext(0.01,0.45, "a [pc] =" + str( np.round(OrbElem.MayAxis, RoundTo) ), {'size':16} )
print("a = ", OrbElem.MayAxis)
# Inclination
plt.figtext(0.01,0.4, r"i [$^\circ$] =" + str( np.round(OrbElem.Incl, RoundTo) ), {'size':16} )
print("i = ", OrbElem.Incl)
# Longitude of ascending node
plt.figtext(0.01,0.35, r"$\Omega$ [$^\circ$] =" + str( np.round(OrbElem.LAN, RoundTo) ), {'size':16} )
print("LAN = ", OrbElem.LAN)
# argument of periapsis
plt.figtext(0.01,0.3, r"$\omega$ [$^\circ$] =" + str( np.round(OrbElem.ArgPeri, RoundTo) ), {'size':16} )
print("omega = ", OrbElem.ArgPeri)
for i in range(len(F)):
F[i].legend(loc='best', fontsize=12)
if _fName:
plt.savefig("SMBH/Data/dump/" + _fName)
def plotDataAndFit(_fig, SD:DataContainer, FD:FitContainer, _fName:str = None):
'''
plots only the Positions of the Star with the Fit
'''
#-------------------------------------------------------------
# CONFIG
StarColor = 'black'
StarErr = 'gray'
_ms=3 # marker size
_fig.clf()
_tf = _fig.add_subplot(1,1,1)
_tf.set_aspect('equal', 'box')
_tf.set_axisbelow(True)
_tf.grid(True)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
_tf.set_xlabel(r'RA [mas]', {'size':14})
_tf.set_ylabel(r'DE [mas]', {'size':14})
#-------------------------------------------------------------
# center
_tf.scatter(0,0,c="red", marker='+', label='center', s=50)
# actual data
_tf.errorbar(SD.RA, SD.DE, xerr=SD.eRA, yerr=SD.eDE, fmt='o', ecolor=StarErr, color=StarColor, label='S'+str(SD.StarNr) + ' Orbit', ms=_ms)
# fake data
_tf.plot(FD.PosPath[0], FD.PosPath[1], c='tab:blue', label='Fit')
#-------------------------------------------------------------
OrbElem = FD.OrbElem
RoundTo = 3
# Mass
plt.figtext(0.01,0.7, r"M [$10^6 M_\odot$] =" + str( np.round(FD.Mass/1E6, RoundTo) ), {'size':16})
print("M = ", FD.Mass/1E6)
# Distance
plt.figtext(0.01,0.65, "D [kpc] =" + str( np.round(FD.Distance/1E3, RoundTo) ), {'size':16} )
print("R = ", FD.Distance/1E3)
# Period
plt.figtext(0.01,0.6, "T [yr] =" + str( np.round(OrbElem.Period, RoundTo) ), {'size':16} )
print("T = ", OrbElem.Period)
# Eccentricity
plt.figtext(0.01,0.55, "e [1] =" + str( np.round(OrbElem.Ecc, RoundTo) ), {'size':16} )
print("e = ", OrbElem.Ecc)
# Semi Mayor Axis
plt.figtext(0.01,0.45, "a [pc] =" + str( np.round(OrbElem.MayAxis, RoundTo) ), {'size':16} )
print("a = ", OrbElem.MayAxis)
# Inclination
plt.figtext(0.01,0.4, r"i [$^\circ$] =" + str( np.round(OrbElem.Incl, RoundTo) ), {'size':16} )
print("i = ", OrbElem.Incl)
# Longitude of ascending node
plt.figtext(0.01,0.35, r"$\Omega$ [$^\circ$] =" + str( np.round(OrbElem.LAN, RoundTo) ), {'size':16} )
print("LAN = ", OrbElem.LAN)
# argument of periapsis
plt.figtext(0.01,0.3, r"$\omega$ [$^\circ$] =" + str( np.round(OrbElem.ArgPeri, RoundTo) ), {'size':16} )
print("omega = ", OrbElem.ArgPeri)
_tf.legend(loc='best', fontsize=12)
if _fName:
plt.savefig("SMBH/Data/dump/" + _fName)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
# ROUTINE
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
def getOrbit(SD:DataContainer, Orb:OrbitElem, ParamVec:list, index:int, max_iter:float, stepsize:float, kwargs:dict={}) -> FitContainer:
"""
Calculates the orbit for given parameters
Parameters
----------
SD : DataContainer
Star Data imported from file
Orb : OrbitElem
Orbital Elements for initial state
potential : function
function that returns the strength of the potential. must take r,v,M as params in order
ParamVec : list
Parameter Vector (8 dim)
index : int
Index point from which the Algorithm starts to integrate in both directions
max_iter : float
Max Integration steps in either direction from index
stepsize : float
delta t
Returns
-------
FitContainer
Resulting Fit Data
"""
# convert parameter vector to parameters
_M, _r, _v, _d = ParVecToParams(ParamVec)
# switch the Integrator method
if 'method' in kwargs.keys():
if kwargs['method'] == 'Schwarz':
IntegratorToUse = VerletStepSchwarz
PotFunc = potentialSchwarz
else:
IntegratorToUse = VerletStep
PotFunc = potential
else:
IntegratorToUse = VerletStep
PotFunc = potential
# try different positions of sgr a*
if 'varSGRA' in kwargs.keys():
# convert to pc using the current distance while fitting
SGRA_Pos = PosRadToReal(kwargs['varSGRA'], _d)
else:
# default to (0,0,0)
SGRA_Pos = np.array([0,0,0])
#------------------------------------------------------------------------
# CURRENT VALUES
PeriodFront = SD.TimeP[-1] - SD.TimeP[index] # Time to render from index point to end
PeriodBack = SD.TimeP[index] - SD.TimeP[0] # Time to render from beginning to index point
r_cur = _r # Position
v_cur = _v # Velocity
f_cur = PotFunc(_r,_v,_M, SGRA_Pos) # init the potential
init_Er = _r / np.linalg.norm(_r) # Unit vector of initial position
#------------------------------------------------------------------------
# TRANSIENT DATA
cur_timer = 0 # current iteration step in orbit integration
last_err = 1 # unit vector of last position
cutoff_timer = max_iter # max iteration step in orbit integration
PastOneMinus = False # checking if integration is over the point where dot(now, init) = -1
stopDotProd = False # once true, the orbit end point will be calculated and no dot() will be executed
OrbitPeriod = Orb.Period # Period of this orbit. used to determine total length of fake data
OneOrbitSteps = 0 # used in backward rendering. represents how many steps one orbit is
if (OrbitPeriod > 0):
QuotientFront = np.ceil(PeriodFront / OrbitPeriod) # number of orbits to compute in front direction
QuotientBack = PeriodBack / OrbitPeriod # number of orbits to compute in back direction, CAN BE FLOAT since steps for one orbit have been determined
# if Period is not defined, return high error
else:
FD_Fail = FitContainer(ParamVec, success=False, _orb=Orb)
return FD_Fail
if DEBUG_MODE:
print("orbit time: ", OrbitPeriod)
print("quotient front: ", QuotientFront)
print("quotient back: ", QuotientBack)
# Overwrite integration length if needed
if 'front' in kwargs.keys():
QuotientFront = kwargs['front']
if 'back' in kwargs.keys():
QuotientBack = kwargs['back']
#save steps for plot and error handling. start with init point
PosTracker = [_r]
PosTrackerBack = []
VelTracker = [_v]
VelTrackerBack = []
if 'useFile' in kwargs.keys():
useExtFile = kwargs['useFile']
else:
useExtFile = False
if useExtFile:
_OFile = open(OrbitFileForewrd, "w")
#------------------------------------------------------------------------
# ORBIT INTEGRATION
# Integrate from Index Point to End point, forward time
while cur_timer < cutoff_timer:
cur_timer += 1
#get the next infinitesimal step in position and velocity
_dat = IntegratorToUse(stepsize, r_cur, v_cur, f_cur, _M, r_SGRA=SGRA_Pos)
#new position in real space
r_cur = _dat[0]
#new velocity
v_cur = _dat[1]
#new potential for next step
f_cur = _dat[2]
#position in radial space
if useExtFile:
_OFile.write(str(r_cur[0]) + " " + str(r_cur[1]) + " " + str(r_cur[2]) + " " + str(v_cur[0]) + " " + str(v_cur[1]) + " " + str(v_cur[2]) + "\n")
else:
PosTracker.append(r_cur)
VelTracker.append(v_cur)
# determine end of orbit integration
if (not stopDotProd):
temp = np.dot( init_Er, r_cur/np.linalg.norm(r_cur) )
if (not PastOneMinus):
if (temp - last_err < 0 ):
last_err = temp
else:
PastOneMinus = True
last_err = temp
# orbit is past the dot() = -1 point => dot() increases again
else:
# dot() is still increasing
if (temp > last_err):
last_err = temp
# dot() has decreased or is equal to prev step => one orbit complete. calculate cutoff timer for end of Measurement data
else:
# calculate multiple orbits based on data end
cutoff_timer = cur_timer * QuotientFront
# steps for one orbit
OneOrbitSteps = cur_timer
stopDotProd = True
if DEBUG_MODE:
print("forward cutoff = ", cutoff_timer)
# reset some data
cur_timer = 0
r_cur = _r
v_cur = _v
f_cur = PotFunc(_r,_v,_M)
if useExtFile:
_OFile.close()
_OFile = open(OrbitFileBackwrd, "w")
# Integrate multiple orbits backwards depending on data beginning
while cur_timer < np.ceil(OneOrbitSteps * QuotientBack) :
cur_timer += 1
# reverse time
_dat = IntegratorToUse(-stepsize, r_cur, v_cur, f_cur, _M, r_SGRA=SGRA_Pos)
#new position in real space
r_cur = _dat[0]
#new velocity
v_cur = _dat[1]
#new potential for next step
f_cur = _dat[2]
if useExtFile:
_OFile.write(str(r_cur[0]) + " " + str(r_cur[1]) + " " + str(r_cur[2]) + " " + str(v_cur[0]) + " " + str(v_cur[1]) + " " + str(v_cur[2]) + "\n")
else:
PosTrackerBack.append(r_cur)
VelTrackerBack.append(v_cur)
if useExtFile:
_OFile.close()
if DEBUG_MODE:
print("backward cutoff = ", cur_timer)
#------------------------------------------------------------------------
# CONCAT DATA
if not useExtFile:
# reverse backward time points
PosTrackerBack.reverse()
VelTrackerBack.reverse()
# append data
PosTracker = np.array(PosTrackerBack + PosTracker)
VelTracker = np.array(VelTrackerBack + VelTracker)
#------------------------------------------------------------------------
# RETURN FIT DATA
FD = FitContainer(ParamVec, _oN = QuotientFront, _orb=Orb, _PosArr=PosTracker, _VelArr = VelTracker)
return FD
def FitDataInner(SD:DataContainer, kwargs:dict={}) -> Tuple[FitContainer, list]:
"""
Inner Routine for fitting the orbit of a specified star.
Only needs the Star Data Object to work.
Can plot data in multiple ways
Parameters
----------
SD : DataContainer
Star Data Object
Returns
-------
[FitData Container, [index_R, index_V] ]
"""
#------------------------------------------------------------------------
# CONSTRAINTS
#Mass constraints in solar mass
CSRT_M_min = 1E5
CSRT_M_max = 1E7
#Distance in pc
CSRT_R_min = 7000
CSRT_R_max = 9000
#------------------------------------------------------------------------
# INIT VALUES
# First Mass estimate -- random val in range
Mass_0 = 4E6 #np.random.uniform(CSRT_M_min, CSRT_M_max)
if DEBUG_MODE:
print("Init Mass [10^6 Msun] = ", Mass_0/1E6)
#Fist Distance Reference Point
Distance_0 = 8000 #np.random.uniform(CSRT_R_min, CSRT_R_max)
if DEBUG_MODE:
print("Init Distance [pc] = ", Distance_0)
# index for beginning point of data, IMPORTANT
_index_R = 61 # 2007.55
_index_V = 24 # 2007.55
if DEBUG_MODE:
print("Index Point Pos: ", SD.TimeP[_index_R], SD.TimeP[_index_R-1])
print("Index Point RV : ", SD.TimeV[_index_V])
# Position Vector, use random data point for start, convert to pc
r0 = PosRadToReal( np.array(
[
SD.RA[_index_R],
SD.DE[_index_R],
0
] ), Distance_0 )
if DEBUG_MODE:
print("Init Position [pc] = ", r0)
#Velocity Vector, use same random point + point prior to calculate first estimate of v, in km/s
v0 = np.array(
[
(SD.RA[_index_R]-SD.RA[_index_R-1])*Distance_0*GLOB_PcYrKmS*GLOB_masToRad / (SD.TimeP[_index_R]-SD.TimeP[_index_R-1]),
(SD.DE[_index_R]-SD.DE[_index_R-1])*Distance_0*GLOB_PcYrKmS*GLOB_masToRad / (SD.TimeP[_index_R]-SD.TimeP[_index_R-1]),
SD.VR[_index_V]
])
if DEBUG_MODE:
print("Init Velocity [km/s] = ", v0)
stepsize = 1E-8 # orbit integration delta t
# Algorithm breaks when max_iteration is smaller than the needed steps to complete the simulation
max_iteration = 1E6 # orbit integration max steps; max for 1E-10 approx 600.000
# stepsize overwrite
if 'Stepsize' in kwargs.keys():
stepsize = kwargs['Stepsize']
global GLOB_counter
GLOB_counter = 0
if 'grav-red' in kwargs.keys():
useGravRedCorr = kwargs['grav-red']
else:
useGravRedCorr = False
if 'Pbar' in kwargs.keys():
usePbar = kwargs['Pbar']
else:
usePbar = True
#------------------------------------------------------------------------
# Parameter Vector
parVec = np.array([ Mass_0, r0[0], r0[1], r0[2], v0[0], v0[1], v0[2], Distance_0 ])
# [ (min, max) ]
BOUNDS = [
(1E6, 7E6), # in msun, M
(RadToReal( SD.RA[_index_R] - 15, 8000 ), RadToReal( SD.RA[_index_R] + 15, 8000 )), # in pc, x
(RadToReal( SD.DE[_index_R] - 15, 8000 ), RadToReal( SD.DE[_index_R] + 15, 8000 )), # in pc, y
(-0.2, 0.2), # in pc, z
(v0[0] - 1500,v0[0] + 1500), # in km/s, vx
(v0[1] - 1500,v0[1] + 1500), # in km/s, vy
(v0[2] - 500,v0[2] + 500), # in km/s, vz
(7800, 8800) # in pc, d
]
#------------------------------------------------------------------------
# MAIN LOOP
# function to be minimized
def _tFitFunction(_parVec, *args):
OrbEl = getOrbitalElements(_parVec)
if usePbar:
global GLOB_counter
GLOB_counter += 1
# everything with ecc> 1 or T<0, T>17 is wrong
if (OrbEl.Period > 12 and OrbEl.Ecc < 1 and OrbEl.Period <= 20):
_FD = getOrbit(SD=SD, Orb=OrbEl, ParamVec=_parVec, index=_index_R, max_iter=max_iteration, stepsize=stepsize, kwargs=kwargs)
x = returnCombinedError(SD, _FD, [_index_R, _index_V], redshiftCorr=useGravRedCorr)
if usePbar:
if args[0]:
ProgressBar(GLOB_counter, 8000, "chi2= " + str(x))
else:
NoProgressBar(GLOB_counter, "chi2= " + str(x))
return x
# dont calculate orbit, return constant error
else:
if usePbar:
if args[0]:
ProgressBar(GLOB_counter, 8000, "chi2= 1E10")
else:
NoProgressBar(GLOB_counter, "chi2= 1E10")
return 1E10
# Kepler solution, global, 1E-8 -- chosen if no parameters are set
#parVec = np.array([ 4.26060187e+06, -2.98146568e-04, 7.21594511e-03, -5.38160820e-03, -4.51226416e+02, 1.62323029e+02, -4.23509314e+02, 8.38337634e+03])
# use first guess parameter vector
if kwargs['method'] == 'None':
pass
# use random parameter vector
if kwargs['method'] == 'Random':
Mass_0 = np.random.uniform(CSRT_M_min, CSRT_M_max)
Distance_0 = np.random.uniform(CSRT_R_min, CSRT_R_max)
r0 = PosRadToReal( np.array( [ SD.RA[_index_R], SD.DE[_index_R], 0 ] ), Distance_0 )
v0 = np.array(
[ (SD.RA[_index_R]-SD.RA[_index_R-1])*Distance_0*GLOB_PcYrKmS*GLOB_masToRad / (SD.TimeP[_index_R]-SD.TimeP[_index_R-1]),
(SD.DE[_index_R]-SD.DE[_index_R-1])*Distance_0*GLOB_PcYrKmS*GLOB_masToRad / (SD.TimeP[_index_R]-SD.TimeP[_index_R-1]),
SD.VR[_index_V] ] )
parVec = np.array([ Mass_0, *r0, *v0, Distance_0 ])
# Kepler solution, 1E-8
if stepsize == 1E-8 and kwargs['method'] == 'Newton':
parVec = np.array([ 4.26175122e+06, -2.95254493e-04, 7.21524671e-03, -5.38486006e-03, -4.51353063e+02, 1.63648795e+02, -4.21806411e+02, 8.38370406e+03])
#Kepler solution, 1E-9
if stepsize == 1E-9 and kwargs['method'] == 'Newton':
parVec = np.array( [4.26180655e+06, -2.96184228e-04, 7.19338917e-03, -5.38020387e-03, -4.51561607e+02, 1.62968775e+02, -4.24584527e+02, 8.35915945e+03] )
# Schwarzschild solution, with grav red, 1E-9
if stepsize == 1E-9 and kwargs['method'] == 'Schwarz':
parVec = np.array([ 4.42965827e+06, -3.01141313e-04, 7.29306129e-03, -5.48739705e-03, -4.55624852e+02, 1.64210719e+02, -4.28133758e+02, 8.47566930e+03])
#------------------------------------------------------------------------
# Fit Options
shouldFit = 0
if 'Fit' in kwargs.keys():
if kwargs['Fit'] == 'Local':
shouldFit = 1
if kwargs['Fit'] == 'Full':
shouldFit = 2
t0 = time.process_time()
# local fit only
if shouldFit == 1:
if usePbar:
print("Start Local Fit\n")
RESULT = optimize.minimize(_tFitFunction, x0=parVec, args=(True,), method='Powell', options={'disp':False})
if usePbar:
print("\n")
print("[%s] - Message: %s\nResult: %s\ncurrent function val: %s" % (RESULT.success, RESULT.message, RESULT.x, RESULT.fun))
t0 = time.process_time() - t0
print("Done in %ss, nit = %s, delT = %ss" % (round(t0, 3), RESULT.nfev, round(t0/RESULT.nfev,3)))
parVec = RESULT.x.tolist()
# Global Fit
if shouldFit == 2:
if usePbar:
print("Start Global Fit\n")
RESULT = optimize.dual_annealing(_tFitFunction, args=(False,), bounds=BOUNDS, initial_temp=5230, maxfun=1E5, maxiter=250, local_search_options={"method": "Powell"})
if usePbar:
print("\n")
print(RESULT)
parVec = RESULT.x.tolist()
# reset counter
GLOB_counter = 0
RESULT = optimize.minimize(_tFitFunction, x0=parVec, args=(True,), method='Powell', options={'disp':False})
if usePbar:
print("\n")
print("[%s] - Message: %s\nResult: %s\ncurrent function val: %s" % (RESULT.success, RESULT.message, RESULT.x, RESULT.fun))
t0 = time.process_time() - t0
print("Done in %ss, nit = %s, delT = %ss" % (round(t0, 3), RESULT.nfev, round(t0/RESULT.nfev,3)))
parVec = RESULT.x.tolist()
#------------------------------------------------------------------------
# Return
OrbEl = getOrbitalElements(parVec)
NewFitData = getOrbit(SD=SD, Orb=OrbEl, ParamVec=parVec, index=_index_R, max_iter=max_iteration, stepsize=stepsize, kwargs=kwargs)
_Err = returnCombinedError(SD, NewFitData, [_index_R, _index_V], redshiftCorr=useGravRedCorr)
return NewFitData, [_index_R, _index_V]
def FitDataStandalone(_starNr:int, kwargs:dict={}) -> Tuple[FitContainer, DataContainer, list]:
"""
Standalone Routine for fitting the orbit of a specified star.
can display plot, intended for fitting and plotting from beginning
Parameters
----------
_starNr : [int] Number of the Star to be fitted
_fig : Reference to plt figure for plotting
Options
-------
method : Newton, Schwarz -> Potential to use
grav-red : bool -> use Gravitational Redshift correction
Fit : None, Local, Full -> Fit Options
Stepsize : float -> overwrite default stepsize of 1E-8
"""
#read complete data from file
Data = readTable(fileName)
#return Data for Star S-NUMBER
S_Data = return_StarExistingData(Data, _starNr)
#Star Data Container
SD = DataContainer(_starNr, S_Data)
FD, selIndex = FitDataInner(SD, kwargs=kwargs)
# when using gravitational redshift correction, update star data radial velocity data
if 'grav-red' in kwargs.keys() and 'Fit' in kwargs.keys():
if kwargs['grav-red']: # and (kwargs['Fit'] == 'Local' or kwargs['Fit'] == 'Full'):
_eT = SD.TimeP[selIndex[0]] - SD.TimeP[0] + FD.OrbitNumber * FD.OrbElem.Period
fakeTimeline = np.linspace(0,_eT, len(FD.VPath))
j = 0
rTime = SD.TimeV - SD.TimeP[0]
LengthAtVR = np.empty(len(rTime))
for i in range(len(SD.TimeV)):
for k in range(j, len(fakeTimeline)):
if (fakeTimeline[k] >= (rTime)[i]):
#newVR_Timeline[i] = fakeTimeline[k]
LengthAtVR[i] = np.linalg.norm(FD.PositionArray[k])# FD.PositionArray[k][2] #
j = k
break
PN_VR = SD.VR - getGravRedshift(FD.Mass, LengthAtVR)
SD.VR = PN_VR
return FD, SD, selIndex
def genMCD(SD:DataContainer, iter:int, kwargs:dict={}):
"""
Calculates new Parameters after variating Star Data adn writes them to file
Parameters
----------
SD : DataContainer
Star Data in question
iter : int
Total Fits to compute and use for Errorbar calculation
"""
FileToWriteTo = outputFile
if 'File' in kwargs.keys():
FileToWriteTo = kwargs['File']
if 'UseSGRA_Pos' in kwargs.keys():
useSGRA_Pos = kwargs['UseSGRA_Pos']
else:
useSGRA_Pos = False
#------------------------------------------------------------------------
# MAIN LOOP
# main loop, generating new data and fitting to get a list for every parameter
for curIt in range(iter):
# generate new points within 1 sigma of error
newRA = generateMCData(SD.RA, SD.eRA)
newDE = generateMCData(SD.DE, SD.eDE)
newVR = generateMCData(SD.VR, SD.eVR)
print(SD.RA)
print(newRA)
print(SD.DE)
print(newDE)
# create copy of Star Data and overrite the points
NewSD = SD.copy()
NewSD.RA = newRA
NewSD.DE = newDE
NewSD.VR = newVR
# generate new position of sgr a* if needed, use global bounds
if useSGRA_Pos:
#local best fit
NewSGRA_Pos = np.random.normal(0,1,3) * GLOB_SGRA_Pos
else:
NewSGRA_Pos = np.array([0,0,0])
# change the position of sgr a*
kwargs['varSGRA'] = NewSGRA_Pos
if DEBUG_MODE:
print("New Position of SGR A*: ", NewSGRA_Pos)
# Fit the new Star Data
print("\n")
print('-'*25 + "Starting new Fit (%s/%s)" % (curIt+1, iter))
newFD, _ = FitDataInner(NewSD, kwargs=kwargs)
_tParVec = newFD.returnParVec()
# write data to file
f = open(FileToWriteTo, "a")
for j in range(len(_tParVec)):
f.write(str(_tParVec[j]) + " ")
f.write("\n")
f.close()
print("\nDone!\n")
def genMCD_MP(SD:DataContainer, pid:int, kwargs:dict={}):
"""
Calculates new Parameters after variating Star Data adn writes them to file
Parameters
----------
SD : DataContainer
Star Data in question
iter : int
Total Fits to compute and use for Errorbar calculation
"""
FileToWriteTo = outputFile
if 'File' in kwargs.keys():
FileToWriteTo = kwargs['File']
if 'UseSGRA_Pos' in kwargs.keys():
useSGRA_Pos = kwargs['UseSGRA_Pos']
else:
useSGRA_Pos = False
#------------------------------------------------------------------------
# MAIN LOOP
# generate new points within 1 sigma of error
newRA = generateMCData(SD.RA, SD.eRA)
newDE = generateMCData(SD.DE, SD.eDE)
newVR = generateMCData(SD.VR, SD.eVR)
# create copy of Star Data and overrite the points
NewSD = SD.copy()
NewSD.RA = newRA
NewSD.DE = newDE
NewSD.VR = newVR
if useSGRA_Pos:
NewSGRA_Pos = np.random.normal(0, 1, 3) * GLOB_SGRA_Pos
#NewSGRA_Pos = PosRadToReal(NewSGRA_Pos, _tDist)
else:
NewSGRA_Pos = np.array([0,0,0])
# still in mas
kwargs['varSGRA'] = NewSGRA_Pos
# Fit the new Star Data
print('-'*25 + "Starting new Fit (%s)" % (pid))
newFD, _ = FitDataInner(NewSD, kwargs=kwargs)
_tParVec = newFD.returnParVec()
# write data to file
f = open(FileToWriteTo, "a")
for j in range(len(_tParVec)):
f.write(str(_tParVec[j]) + " ")
f.write("\n")
f.close()
print("%s, Done!" % (pid))
def evaMCD(_fig, file:str):
"""
Evaluates Parameters written to file and calculates mean and std values for every parameter and prints them out
"""
print(file)
f = open(file, 'r')
lines = f.readlines()
h= []
for i in range(len(lines)):
_t = lines[i].strip()
_t = _t.split(" ")
_t = [float(x) for x in _t]
h.append(_t)
mean = []
std = []
g = []
histData = []
histName = [r'$M$ [$10^6 M_\odot$]', r'$R$ [kpc]', r'$e$ [1]', r'$a$ [$10^{-3}$pc]', r'$i$ [$^\circ$]', r'$T$ [yr]']
kartPos = []
N = ["Mass", "R", "e", "a", "i", "LAN", "argPeri", "MeanM", "T", "True Anomaly"]
print("-"*75)
for i in range(len(h)):
OE = getOrbitalElements(h[i])
# Mass, Distance, e, a, i, Omega, omega, M, T, True Anomaly
g.append( [ h[i][0], h[i][7], OE.Ecc, OE.MayAxis, OE.Incl, OE.LAN, OE.ArgPeri, OE.MeanM, OE.Period, OE.TAnom ] )
histData.append([ h[i][0]/1E6, h[i][7]/1E3, OE.Ecc, OE.MayAxis, OE.Incl, OE.Period ])
# position
kartPos.append( [ h[i][1], h[i][2], h[i][3], h[i][4], h[i][5], h[i][6] ] )
for j in range(len(g[0])):
ParDat = [g[i][j] for i in range(len(g))]
mean.append( np.mean( ParDat ) )
std.append( np.std( ParDat ) )
print("MCD: ", N[j], ", mean= ", mean[j], "; std= ", std[j])
print("Length of data: ", len(g))
'''
mean = []
std = []
N = ["x", "y", "z", "vx", "vy", "vz"]
for j in range(len(kartPos[0])):
ParDat = [kartPos[i][j] for i in range(len(kartPos))]
mean.append( np.mean( ParDat ) )
std.append( np.std( ParDat ) )
print("MCD: ", N[j], ", mean= ", mean[j], "; std= ", std[j])
'''
print("-"*75)
_fig.clf()
mean = []
std = []
for j in range(len(histData[0])):
ParDat = [histData[i][j] for i in range(len(histData))]
mean.append( np.mean( ParDat ) )
std.append( np.std( ParDat ) )
mean[3] *= 1E3
std[3] *= 1E3
for l in range(len(histData)):
histData[l][3] *= 1E3
for x in range(6):
for y in range(6):
if y <= x:
_tf = _fig.add_subplot(6,6, 6*x + y + 1)
_tf.grid(False)
_tf.set_aspect('auto')
#_tf.set_xlabel(histName[y])
#_tf.set_ylabel(histName[x])
if y != 0 or x == 0:
plt.yticks([])
else:
pass
plt.yticks( [4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11],
[4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11], size=8)
plt.yticks(size=8)
if x != 5 or y == 5:
plt.xticks([])
else:
pass
plt.xticks( [4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11],
[4.23,4.275,8.35,8.40,0.881,0.884,4.993,5.013,44.3,44.8,16.03,16.11], rotation=90, size=8)
#plt.xticks(rotation=90, size=8)
if y == x:
_t = [histData[i][x] for i in range(len(histData))]
plt.xlim(mean[x]-3*std[x],mean[x]+3*std[x])
_tf.hist(_t, bins=50)
plt.axvline(mean[x], color='black', linestyle='dashed')
plt.figtext(0.165 + 0.8/6 * x, 0.91 - 0.8/6 * x, round(mean[x], 3), ha='center', size=11 )
else:
_x = [histData[i][y] for i in range(len(histData))]
_y = [histData[i][x] for i in range(len(histData))]
_t = _tf.hist2d(_x, _y, bins=(20,20), cmap=cm.jet)
#_fig.tight_layout()
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, wspace=0, hspace=0)
for i in range(len(histName)):
# horizontal at bottom
plt.figtext(0.15 + 0.825/6 * i, 0.02, histName[i], {'ha':'center', 'size':9})
# vertical at left
plt.figtext(0.01, 0.15 + 0.825/6 * (5-i), histName[i], {'ha':'left', 'size':9})
def DrawChi2Slice(_fig, SD:DataContainer, parVec:list, bounds:list, IndexList:list, _dim:int=50, kwargs:dict={}):
"""
Draws chi2 distribution for M and R
Parameters
----------
SD : DataContainer
Star Data
parVec : list
Current parameter vecotr, preferably from minimum
varyIndex : list
index of parameter to be varied, needs to be length 2
bounds : list
bounds for both variables
"""
dim = _dim
chi2 = np.zeros((dim, dim))
Mvar = np.linspace(bounds[0][0], bounds[0][1], dim)
Rvar = np.linspace(bounds[1][0], bounds[1][1], dim)
iter = 0
_chi2File = open("SMBH/Data/dump/chi2Slice.txt", 'r')
lines = _chi2File.readlines()
h= []
for i in range(len(lines)):
_t = lines[i].strip()
_t = _t.split(" ")
_t = [float(x) for x in _t]
h.append(_t)
chi2 = np.array(h)
miny_i = []
for i in range(100):
_l = np.amin(chi2[i])
_m = np.argwhere(chi2[i] == _l).flatten()
miny_i.append(Mvar[_m])
minx_i = []
for j in range(100):
_m = np.amin(chi2[:,j])
_n = np.argwhere(chi2[:,j] == _m).flatten()
minx_i.append(Rvar[_n])
midy_i = []
for i in range(100):
_l = np.amin(chi2[i])
_m = np.argwhere(chi2[i] <= _l+1).flatten()[0]
midy_i.append(Mvar[_m])
midx_i = []
for j in range(100):
_m = np.amin(chi2[:,j])
_n = np.argwhere(chi2[:,j] <= _m+1).flatten()[0]
midx_i.append(Rvar[_n])
'''
# Distance
for i in range(dim):
# Mass
for j in range(dim):
iter += 1
newPar = parVec.copy()
newPar[0] = Mvar[j]*1E6 # mass
newPar[-1] = Rvar[i]*1E3 # distance
OrbEl = getOrbitalElements(newPar)
if (OrbEl.Period > 0 and OrbEl.Ecc < 1 and OrbEl.Period <= 20):
_FD = getOrbit(SD=SD, Orb=OrbEl, ParamVec=newPar, index=IndexList[0], max_iter=500000, stepsize=1E-8, kwargs=kwargs)
x = returnCombinedError(SD, _FD, IndexList)
chi2[i][j] = x
ProgressBar(iter, dim**2, "chi2= " + str(x))
else:
ProgressBar(iter, dim**2, "chi2= 1E10")
chi2[i][j] = 1E10
print("\nDone!")
'''
'''
_chi2File = open("SMBH/Data/dump/chi2Slice.txt", "w")
for i in range(dim):
for j in range(dim):
_chi2File.write( str(chi2[i][j]) + " " )
_chi2File.write("\n")
_chi2File.close()
'''
_min = np.argwhere(chi2 == np.amin(chi2)).flatten()
_minValue = [ Mvar[_min[1]], Rvar[_min[0]] ]
maxval = np.amax(chi2)
minval = np.amin(chi2)
levels = np.geomspace(minval,maxval, 25)
_fig.clf()
_tf = _fig.add_subplot(1,1,1)
_tf.grid(False)
_tf.set_aspect('auto')
_tf.set_xlabel(r'$R_0$ [kpc]', fontdict={'size':13})
_tf.set_ylabel(r'$M$ [$10^6 M_\odot$]', fontdict={'size':13})
xgit,ygit = np.meshgrid(Rvar, Mvar)
ax = _tf.contourf(xgit.T, ygit.T, chi2, cmap=cm.get_cmap('viridis'), levels=levels)
_fig.colorbar(ax)
_label = r"Min: $M$ [$10^6 M_\odot$]="+str(np.round(_minValue[0],2)) + r", $R_0$ [kpc]=" + str(np.round(_minValue[1],2))
_tf.scatter(_minValue[1], _minValue[0], label=_label, color='red', s=5)
_tf.plot(Rvar,miny_i,color='blue', label='min line')
print(_minValue)
_tf.legend(loc='best')
def determineDeltaOmega(FD:FitContainer) -> list:
startPos = ParVecToParams(FD.returnParVec())[1]
StartEr = startPos / np.linalg.norm(startPos)
#dotList = np.abs( np.dot( FD.PositionArray / np.linalg.norm(FD.PositionArray), StartEr ) )
dotList = np.empty(len(FD.PositionArray))
for i in range(len(FD.PositionArray)):
dot = FD.PositionArray[i] / np.linalg.norm(FD.PositionArray[i])
dotList[i] = ( np.abs( np.dot(dot, StartEr) ) )
xmin = | np.argwhere(dotList <= 1E-2) | numpy.argwhere |
#!/usr/bin/env python
"""
Some math for calculating PSFs from pupil functions. All units are in
microns.
Important note - The default for the simulator, and what is also used
in the diagnostics, is a pupil function with a pixel size of 1/2 the
actual pixel size. This was done as it has a more realistic width. If
you use the correct pixel size the PSF will be too narrow. This can
also be handled using OTF scaling as described in the Hanser paper,
but as this is more complicated. Also the pupil function localization
software would need to be updated to include OTF scaling, which it
currently does not.
This is based on code provided by the Huang Lab at UCSF.
McGorty et al. "Correction of depth-dependent aberrations in 3D
single-molecule localization and super-resolution microscopy",
Optics Letters, 2014.
Another reference for pupil functions is:
Hanser et al. "Phase-retrieved pupil functions in wide-field fluorescence
microscopy", Journal of Microscopy, 2004.
Reference for sample index mismatch aberration:
Liu et al., "Three dimensional single molecule localization using a phase
retrieved pupil function", Optics Express, 2013
Also, thanks to <NAME> for providing his MATLAB code for calculating
vectorial PSFs.
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
"High precision wavefront control in point spread function engineering
for single emitter localization", Optics Express, 26, pp. 8397-8416, 2018.
Hazen 05/19
"""
import math
import numpy
import scipy
import scipy.fftpack
import tifffile
import storm_analysis
import storm_analysis.pupilfn.otf_scaling_c as otfSC
import storm_analysis.pupilfn.pupil_function_c as pfFnC
import storm_analysis.simulator.pf_math_c as pfMathC
class PupilMathException(storm_analysis.SAException):
pass
class Geometry(object):
def __init__(self, size, pixel_size, wavelength, imm_index, NA):
"""
size - The number of pixels in the PSF image, assumed square
and a multiple of 2.
pixel_size - The size of the camera pixel in um.
wavelength - The wavelength of the flourescence in um.
imm_index - The index of the immersion media.
NA - The numerical aperature of the objective.
"""
super(Geometry, self).__init__()
if not ((size%2)==0):
raise PupilMathException("PF size must be a multiple of 2!")
# imm_index must be larger than the objective NA.
if (imm_index <= NA):
raise PupilMathException("Immersion media index must be larger than objective NA!")
self.imm_index = float(imm_index)
self.NA = float(NA)
self.pixel_size = float(pixel_size)
self.size = int(size)
self.wavelength = float(wavelength)
# Hanser, 2004, page 35.
self.k_max = NA/wavelength
dk = 1.0/(size * pixel_size)
self.r_max = self.k_max/dk
[x,y] = numpy.mgrid[ -self.size/2.0 : self.size/2.0, -self.size/2.0 : self.size/2.0]
# Vectors to use for X/Y translation.
self.kx = x/size
self.ky = y/size
kx = dk * x
ky = dk * y
self.k = numpy.sqrt(kx * kx + ky * ky)
# Hanser, 2004, page 34.
tmp = imm_index/wavelength
# Vector to use for Z translation.
self.kz = numpy.lib.scimath.sqrt(tmp * tmp - self.k * self.k)
self.r = self.k/self.k_max
self.kz[(self.r > 1.0)] = 0.0
self.n_pixels = numpy.sum(self.r <= 1)
self.norm = math.sqrt(self.r.size)
if False:
with tifffile.TiffWriter("kz.tif") as tf:
tf.save(numpy.abs(self.kz).astype(numpy.float32))
tf.save(numpy.angle(self.kz).astype(numpy.float32))
def aberration(self, depth, smp_index):
"""
Models the effect of a refractive index difference between the sample
and the immersion media. See Hanser 2004, equations 4-9.
depth - Point source depth in microns.
smp_index - Refractive index of the sample media.
Returns total aberration function (Hanser 2004, equation 8). Multiply the PF by
this numpy array to include this aberration.
This approach appears to have the problem that at the d = 0 limit it does not
converge to the no aberration PF.
"""
# Use complex numbers to include super critical angle (near field) fluorescence
# effects.
#
sin_theta_1 = (self.wavelength/self.imm_index)*self.k + 0j
# Special handling of the center point where self.k = 0.0, this
# will cause problems because then theta_1 will also be 0.0 and
# we'll end up with 0.0/0.0 when we calculate amp_comp. So instead
# we just use a really small number.
#
cp = int(sin_theta_1.shape[0]/2)
sin_theta_1[cp,cp] = 1.0e-6
sin_theta_2 = (self.imm_index/smp_index)*sin_theta_1 + 0j
theta_1 = numpy.arcsin(sin_theta_1)
theta_2 = numpy.arcsin(sin_theta_2)
amp_trans = (sin_theta_1 * | numpy.cos(theta_2) | numpy.cos |
from math import radians
from numpy.lib.arraysetops import isin
from numpy.lib.polynomial import poly
import topojson as tp
import pickle, random
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
# from imantics import Polygons, Mask, Annotation
from geojson import Feature, Polygon, FeatureCollection, LineString, MultiLineString, MultiPolygon
from skimage import measure
from skimage.feature import canny
import skimage.morphology as sm
from collections import defaultdict
from tqdm import tqdm
from math import sin, cos, atan, pi
from imantics import Mask
import topojson as tp
from shapely import geometry
import topojson as tp
# import geopandas as gpd
# world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
# data = world.query('continent == "Africa"')
# r = tp.Topology(data, topology=True).toposimplify(4).to_alt().properties(title='WITH Topology')
palette = eval(open('data/parkinglot/color.json', 'r').read())
CLASSES = ('road', 'curb', 'obstacle', 'chock', 'parking_line', 'road_line', 'vehicle')
PALETTE = [(0, 0, 0), (0, 255, 255), (0, 255, 0), (255, 0, 0), (0, 0, 255), (0, 128, 255), (128, 128, 128)]
tolerance = 1
draw_img = True
img_path = 'temp/test.jpg'
save_svg = False
def polygonize(result, tolerance=1, draw_img=None):
# convert bitmap to polygon
polygons_data = extractPolygons(result)
# sort by area from large to small
polygons_data.sort(key=lambda p: p['area'], reverse=True)
# join vertex of polygons
snapPolygonPoints(polygons_data, result) # snap points
#simplify polygons using topo
topo_data = [Feature(
geometry=Polygon([p['polygon']]),
properties={"name": p['label']}
) for p in polygons_data]
fc = FeatureCollection(topo_data)
topo = tp.Topology(fc, prequantize=True, topology=True, shared_coords=True)
topo_s = topo.toposimplify(
epsilon=tolerance,
simplify_algorithm='dp',
)
# convert to desired structure
polygons_strctured = get_polygon_dict(topo_s)
return polygons_strctured
def applyMorph(result):
## Image morphic operation
# The morphological opening on an image is defined as an erosion followed by a dilation.
# Opening can remove small bright spots (i.e. “salt”) and connect small dark cracks.
# This tends to “open” up (dark) gaps between (bright) features.
result = sm.opening(result, sm.disk(2)) #用边长为2的圆形滤波器进行膨胀滤波
result = sm.dilation(result,sm.disk(tolerance)) #用边长为5的正方形滤波器进行膨胀滤波
result = sm.closing(result, sm.disk(2))
return result
#util
def getBoxAreaAndCenter(points):
center = (points[:,0].mean(), points[:,1].mean())
area = cv2.contourArea(points.astype(np.float32))
return area, center
def extractPolygons(result):
# find polygon
temp_polygons = []
polygons_data = []
for label in palette:
i = CLASSES.index(label)
mask = np.where(result==i, 1, 0).astype(np.uint8)
# using imantics
# polygons = Mask(mask).polygons().points
# Using CV2
mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
polygons = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE, offset=(-1, -1))
polygons = polygons[0] if len(polygons) == 2 else polygons[1]
polygons = [polygon.squeeze() for polygon in polygons]
# using skimage
# mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
# polygons = measure.find_contours(mask, 0.8)
# polygons = [np.vstack([p[:,1]-1, p[:,0]-1]).T for p in polygons]#reverse for skimage
print(f'{label}:{len(polygons)}')
for j, polygon in enumerate(polygons):
# remove duplicated polygon
if not any([polygon.shape == p.shape and polygon.sum() == p.sum() for p in temp_polygons]):
temp_polygons.append(polygon)
else:
print(f'find duplicated polygon: {label}({j})')
continue
if polygon.shape[0] <= 2:
continue
area, center = getBoxAreaAndCenter(polygon)
polygon_approximated = measure.approximate_polygon(polygon, tolerance)
label2 = label+str(j)
if area >= 100:
polygons_data.append({
'label': label,
'polygon': polygon.tolist(),
# 'topo': polygon_feat,
'approximated': polygon_approximated,
'area': area,
'center': center,
'label2': label2
})
else:
print(f'---> Small region {label} with area:{area} and length:{polygon.shape[0]}')
return polygons_data
def snapPolygonPoints(polygons_data:list, mask:np.ndarray):
''' create a lookup table to register polygon vertices
with value on [x, y, 1]-> i'th polygon and
with value on [x, y, 2]-> j'th coordinates of i'th polygon
'''
vertices_lookup = np.full(list(mask.shape[::-1])+[2], fill_value=-1)
polygons = [i['polygon'] for i in polygons_data]
labels = [i['label'] for i in polygons_data]
label_indices = [CLASSES.index(l) for l in labels]
#check polygon joint
checkJointFromPolygon(polygons, mask)
# utils
radius = 1
x_upper, y_upper = mask.shape[::-1]
_validate_coord = lambda x,y: x >= 0 and y >= 0 and x < x_upper and y < y_upper
_lookup = lambda x,y,i=0: vertices_lookup[x, y, i] if _validate_coord(x,y) else None
_lookup_check_pair = lambda x,y,label: _lookup(x, y) is not None and _lookup(x, y) != -1 and _lookup(x, y) != label
# _lookup_range = lambda x0, x1, y0, y1: np.array([[_lookup(x, y) for x in range(x0,x1)] for y in range(y0, y1)])
_lookup_scope = lambda x, y: np.array([[_lookup(x, y) for x in range(x-3, x+4)] for y in range(y-3, y+4)])
_lookup_mask = lambda x,y: mask[y,x] if _validate_coord(x,y) else None # need to reverse x,y from OpenCV to NumPy
_mask_scope = lambda x,y: np.array([[_lookup_mask(x_,y_) for x_ in range(x-3, x+4)] for y_ in range(y-3, y+4)])
_point_distance = lambda x1, y1, x2, y2: abs(x2-x1) + abs(y2-y1)
def _get_valid_points(x, y, label_index, radius = 1):
valid_points = set()
# search valid points from offsets
offsetss = [[(i,j) for i in range(-radius, radius+1)] for j in range(-radius, radius+1)]
for offsets in offsetss:
for offset in offsets:
x_i, y_i = x+offset[0], y+offset[1]
if _point_distance(x,y, x_i, y_i) > radius or (x,y)==(x_i,y_i):
continue
if _lookup_check_pair(x_i, y_i, label_index):
valid_points.add((x_i, y_i))
return valid_points
# create lookup table
for i, polygon in enumerate(polygons):
for j, (x, y) in enumerate(polygon):
x, y = round(x), round(y)
assert _validate_coord(x, y)
if _lookup(x, y) != -1:
vertices_lookup[x, y] = -1
continue
vertices_lookup[x, y, :] = [i,j]
points_snaped = []
points_missed = []
# search for vertices to merge
for i, polygon in tqdm(enumerate(polygons)):
label_index = label_indices[i]
for j, (x, y) in enumerate(polygon):
x, y = round(x), round(y)
if _lookup(x, y) == -1:
continue #snapped
assert _lookup(x, y) == i
x2, y2 = polygon[(j+1)%len(polygon)]#next point
x2, y2 = round(x2), round(y2)
# get target coordinate
# check whether mask is outside and
# check whether candidate coordinates next another polygon's vertex
x_t, y_t = None, None
valid_points = _get_valid_points(x, y, i)
# check for valid points
if len(valid_points) >=2:
valid_points2 = _get_valid_points(x2, y2, i)
vp1 = valid_points - valid_points2
# vp2 = valid_points2 - valid_points
vp3 = valid_points & valid_points2
if len(vp1)>=1 and len(valid_points2)>=1:
valid_points = vp1
elif len(vp3) == 2:
valid_points = {vp3.pop()}
else:
valid_points = set()
# exception
if len(valid_points) == 0:
# print(f'No adjacent vertice found at [{i}]({x},{y}):\n{_lookup_scope(x, y)}')
points_missed.append((i, j, (x, y)))
continue
#snap points
valid_points.add((x,y))
vertices = np.array(list(valid_points))
x_a, y_a = vertices.mean(axis = 0).tolist()
for x_t, y_t in valid_points:
j_t = _lookup(x_t, y_t) #find j'th polygon and n'th coordinates
n_t = _lookup(x_t, y_t, 1) #find n'th point
# if (x,y) != (x_t, y_t):
# print(f'Found adjacent vertice [{i}]({x},{y})<->[{j_t}]({x_t},{y_t})')
polygons[j_t][n_t] = [x_a, y_a]
# remove from lookup table
vertices_lookup[x_t, y_t, 0] = -1
points_snaped.append((j_t, n_t, (x, y)))
#make sure all points are cleared
print(f'{len(points_snaped)} points snapped and {len(points_missed)} points left ({len(points_snaped)/(len(points_missed)+len(points_snaped))*100:.1f}%)')
def get_polygon_dict(topo):
polygons_strctured = defaultdict(list)
topo_json = eval(topo.to_geojson())
for geo in topo_json['features']:
label = geo['properties']['name']
polygon = geo['geometry']['coordinates'][0]
polygons_strctured[label].append(polygon)
return polygons_strctured
def checkJointFromPolygon(polygons, mask, draw_single_points=False):
point_count = np.zeros(mask.shape[::-1])
for i, polygon in enumerate(polygons):
polygon_np = np.zeros_like(point_count)
for (x,y) in polygon:
_x, _y = round(x), round(y)
point_count[_x, _y] += 1
polygon_np[_x, _y] = 1
# Image.fromarray(np.where(polygon_np==1, 255, 0).astype(np.uint8)).save(f'tmp/{i}.png')
singles = (point_count == 1).sum()
point_o = point_count.sum() - singles
# single_p_np = np.zeros_like(point_count)
# single_p_np[point_count == 1] = 255
if draw_single_points:
draw_np = np.where(point_count>1, 128, 0)
draw_np[point_count == 1] = 255
Image.fromarray(draw_np.astype(np.uint8), mode='P').save('temp/single_points.png')
print(f'There are {point_o} points joined({point_o/(singles+point_o)*100:.1f}%)')
# draw image for debugging
def draw_polygon(label, color, polygon, draw):
# fnt = ImageFont.truetype("Arial.ttf", 20)
p = [tuple(i) for i in polygon]
polygon = np.array(polygon)
center = [polygon[:,0].mean(), polygon[:,1].mean()]
center[0] -= 5*len(label)
center[1] -= 10
color2 = tuple(list(color)+[128]) #transparent
color3 = tuple([150-c for c in color]) #invert
# draw.point(polygon3, fill=color3)
# draw.line(polygon3, width=1, fill=color, joint='curve')
draw.polygon(p, fill=color2, outline=color)
draw.point(p, fill=color3)
draw.text(center, label, fill=color)
def drawResults(polygons_data, img_path, mode=None):
img = Image.open(img_path)
# im = img.convert(mode='RGBA')
draw = ImageDraw.Draw(img, mode='RGBA')
# im2 = img.convert(mode='RGBA')
img2 = img.copy()
draw2 = ImageDraw.Draw(img2, mode='RGBA')
# draw approximated polygon (inferior)
img_path = img_path + f'_result_mode{mode}.png'
if mode == 1: # draw original prediction
for data in polygons_data:
label = data['label']
label2 = data['label2']
polygon1 = data['polygon']
color = PALETTE[CLASSES.index(label)]
draw_polygon(label2, color, polygon1, draw)
img.save(img_path) #mask -> polygon
elif mode == 2: # draw prediction using approximation
for data in polygons_data:
label = data['label']
label2 = data['label2']
polygon2 = data['approximated']
color = PALETTE[CLASSES.index(label)]
draw_polygon(label2, color, polygon2, draw2)
img2.save() #mask -> approximate polygon
# draw image: mask -> topo -> simplified polygon
elif mode == 3:
img3 = img.copy()
draw3 = ImageDraw.Draw(img3, mode='RGBA')
for label, polygons in polygons_data.items():
for polygon in polygons:
color = PALETTE[CLASSES.index(label)]
draw_polygon(label, color, polygon, draw3)
img3.save(img_path) #mask -> topo -> simplified polygon
# draw original topo graph
elif mode == 4:
# img4 = Image.fromarray(np.zeros_like(result.astype(np.uint8))).convert(mode='RGB')
img4 = img.copy()
draw4 = ImageDraw.Draw(img4, mode='RGBA')
for label, polygons in polygons_data.items():
for polygon in polygons:
color = PALETTE[CLASSES.index(label)]
draw_polygon(label, color, polygon, draw4)
img4.save(img_path) #mask -> topo polygon
else:
raise Exception(f'Unexpected mode: {mode}')
return img_path
if __name__ == '__main__':
## Load data
# load from topo_data
# data = pickle.load(open('topo_data', 'rb'))
# load from mask
polygons_data = []
img = Image.open('temp/mask.png')
result = | np.asarray(img) | numpy.asarray |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from past.utils import old_div
import unittest
from anuga.structures.boyd_box_operator import Boyd_box_operator
from anuga.structures.boyd_box_operator import boyd_box_function
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross
from anuga.shallow_water.shallow_water_domain import Domain
import numpy
verbose = False
class Test_boyd_box_operator(unittest.TestCase):
"""
Test the boyd box operator, in particular the discharge_routine!
"""
def setUp(self):
pass
def tearDown(self):
pass
def _create_domain(self,d_length,
d_width,
dx,
dy,
elevation_0,
elevation_1,
stage_0,
stage_1,
xvelocity_0 = 0.0,
xvelocity_1 = 0.0,
yvelocity_0 = 0.0,
yvelocity_1 = 0.0):
points, vertices, boundary = rectangular_cross(int(old_div(d_length,dx)), int(old_div(d_width,dy)),
len1=d_length, len2=d_width)
domain = Domain(points, vertices, boundary)
domain.set_name('Test_Outlet_Inlet') # Output name
domain.set_store()
domain.set_default_order(2)
domain.H0 = 0.01
domain.tight_slope_limiters = 1
#print 'Size', len(domain)
#------------------------------------------------------------------------------
# Setup initial conditions
#------------------------------------------------------------------------------
def elevation(x, y):
"""Set up a elevation
"""
z = | numpy.zeros(x.shape,dtype='d') | numpy.zeros |
#!/usr/bin/env python3
#
# TImestream DAta Storage (TIDAS).
#
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file. All rights
# reserved. Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# WARNING: Running this script will generate a several GB of data...
# This is just a toy exercise. We assume continuous data collection with
# no gaps. We use very simple types of detector and housekeeping data.
# In a "real" dataset, we would be unpacking raw frame data from a data
# acquisition system and would know how many samples we have for a given
# observation. Here we just make these numbers up.
# THIS IS JUST AN EXAMPLE.
import os
import sys
import shutil
import numpy as np
import datetime
import calendar
import tidas
from tidas import DataType as tdt
# The name of the volume
path = "demo_telescope"
# Create the schemas for the data groups that we will have for each day
# ---- Data from a weather station ----
wfields = list()
wfields.append(tidas.Field("windspeed", tdt.float32, "Meters / second"))
wfields.append(tidas.Field("windangle", tdt.float32, "Degrees"))
wfields.append(tidas.Field("temperature", tdt.float32, "Degrees Celsius"))
wfields.append(tidas.Field("pressure", tdt.float32, "Millibars"))
wfields.append(tidas.Field("humidity", tdt.float32, "Percent"))
wfields.append(tidas.Field("PWV", tdt.float32, "mm"))
weather_schema = tidas.Schema(wfields)
# sampled every 10 seconds
weather_rate = 1.0 / 10.0
weather_daysamples = int(24.0 * 3600.0 * weather_rate)
# ---- Housekeeping data ----
hfields = list()
hfields.append(tidas.Field("thermo1", tdt.float32, "Degrees Kelvin"))
hfields.append(tidas.Field("thermo2", tdt.float32, "Degrees Kelvin"))
hk_schema = tidas.Schema(hfields)
# sampled once per minute
hk_rate = 1.0 / 60.0
hk_daysamples = int(24.0 * 3600.0 * hk_rate)
# ---- Pointing data ----
pfields = list()
pfields.append(tidas.Field("az", tdt.float32, "Radians"))
pfields.append(tidas.Field("el", tdt.float32, "Radians"))
pfields.append(tidas.Field("psi", tdt.float32, "Radians"))
pointing_schema = tidas.Schema(pfields)
# sampled at 20 Hz
pointing_rate = 20.0
pointing_daysamples = int(24.0 * 3600.0 * pointing_rate)
# ---- Detector data ----
ndet = 50
dfields = list()
for d in range(ndet):
detname = "det_{:04d}".format(d)
dfields.append(tidas.Field(detname, tdt.int16, "ADU"))
det_schema = tidas.Schema(dfields)
# sampled at 100Hz
det_rate = 100.0
det_daysamples = int(24.0 * 3600.0 * det_rate)
day_seconds = 24 * 3600
# Remove the volume if it exists
if os.path.isdir(path):
shutil.rmtree(path)
# Create the volume all at once. To keep the size of the volume
# reasonable for this demo, only write 3 days of data.
vol = tidas.Volume(path, tidas.BackendType.hdf5, tidas.CompressionType.none, dict())
# Get the root block of the volume
root = vol.root()
volstart = datetime.datetime(2018, 1, 1)
volstartsec = volstart.timestamp()
for year in ["2018"]:
# Add a block for this year
yb = root.block_add(year, tidas.Block())
for monthnum in range(1, 2):
# Add a block for the month
month = calendar.month_abbr[monthnum]
mb = yb.block_add(month, tidas.Block())
weekday, nday = calendar.monthrange(int(year), monthnum)
for dy in range(1, 4):
daystart = datetime.datetime(int(year), monthnum, dy)
daystartsec = (daystart - volstart).total_seconds() + volstartsec
# Add a block for the day
day = "{:02d}".format(dy)
db = mb.block_add(day, tidas.Block())
# Just fake some seed for now
seed = int(year) * 1000000 + monthnum * 10000 + dy * 100
np.random.seed(seed)
# Now we are going to add the data groups for this day.
print("{}-{}-{:02d}:".format(year, month, dy))
print(" writing weather data")
weather = tidas.Group(
weather_schema, tidas.Dictionary(), weather_daysamples
)
weather = db.group_add("weather", weather)
weather.write_times(
0,
np.linspace(
daystartsec, daystartsec + day_seconds, num=weather_daysamples
),
)
data = np.absolute(
np.random.normal(loc=0.0, scale=5.0, size=weather_daysamples)
).astype(np.float32)
weather.write("windspeed", 0, data)
data = 360.0 * np.absolute(
np.random.random(size=weather_daysamples)
).astype(np.float32)
weather.write("windangle", 0, data)
data = np.absolute(
np.random.normal(loc=25.0, scale=5.0, size=weather_daysamples)
).astype(np.float32)
weather.write("temperature", 0, data)
data = np.absolute(
np.random.normal(loc=1013.25, scale=30.0, size=weather_daysamples)
).astype(np.float32)
weather.write("pressure", 0, data)
data = np.absolute(
np.random.normal(loc=30.0, scale=10.0, size=weather_daysamples)
).astype(np.float32)
weather.write("humidity", 0, data)
data = np.absolute(
np.random.normal(loc=10.0, scale=5.0, size=weather_daysamples)
).astype(np.float32)
weather.write("PWV", 0, data)
print(" writing housekeeping data")
hk = tidas.Group(hk_schema, tidas.Dictionary(), hk_daysamples)
hk = db.group_add("hk", hk)
hk.write_times(
0,
np.linspace(daystartsec, daystartsec + day_seconds, num=hk_daysamples),
)
data = np.random.normal(loc=273.0, scale=5.0, size=hk_daysamples).astype(
np.float32
)
hk.write("thermo1", 0, data)
data = | np.random.normal(loc=77.0, scale=5.0, size=hk_daysamples) | numpy.random.normal |
'''
Particle filter class
'''
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from numba import jit, njit, prange
from numpy import pi
from numpy.linalg import norm
from numpy.random import randn, uniform
from scipy.stats import norm as statsnorm
from .stats import percentile
def _weightedMean(particles, weights):
'''
Weighted mean of particles
'''
temp = 0.0
temp2 = 0.0
for i in prange(particles.shape[0]):
temp += particles[i]*weights[i]
temp2 += weights[i]
temp2 += 1.e-300 # avoid round-off to zero
if temp2 >= 1e-300:
return temp/temp2
else:
return 0.0
@njit(cache=True)
def _weightedVar(mean, particles, weights):
'''
Weighted variance of particles
'''
temp = 0.0
temp2 = 0.0
for i in prange(particles.shape[0]):
temp += (particles[i] - mean)**2*weights[i]
temp2 += weights[i]
temp2 += 1.e-300 # avoid round-off to zero
if temp2 >= 1e-300:
return temp/temp2
else:
return 0.0
@njit(cache=True)
def _systematicResample(weights, indexes, randomnumber):
''' Performs the systemic resampling algorithm used by particle filters.
This algorithm separates the sample space into N divisions. A single random
offset is used to to choose where to sample from for all divisions. This
guarantees that every sample is exactly 1/N apart.
Parameters
----------
weights : list-like of float
list of weights as floats
'''
N = weights.shape[0]
# make N subdivisions, and choose positions with a consistent random offset
positions = (randomnumber + np.arange(N)) / N
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
indexes[i] = j
i += 1
else:
j += 1
def _normPdf(mu, var, z):
'''
Gaussian normal PDF
'''
return 1.0/((2*np.pi*var)**0.5)*np.exp(-(z - mu)**2/(2*var))
class ParticleFilter():
'''
A particle filter class
Parameters
----------
N : int
Number of particles
R : float or array_like
Variance of measured states
len(R) == len(measuredStates)
Q : float or array_like
Variance of actuation error
Part of model
model : function(u, states, parameters, Q)
Model that generates next step of states
using previous states, parameters and Q
statesDerivative can be used as a placeholder for the derivative
Example:
def model(u, states, parameters, statesDerivative, Q):
m = parameters[:, 0]
k = parameters[:, 1]
c = parameters[:, 2]
dt = 1.0
statesDerivative[:, 0] = states[:, 1]
statesDerivative[:, 1] = 1.0/m*(-k*states[:, 0] - c*states[:, 1] + (u + randn(states.shape[0])*np.sqrt(Q))
states[:, 0] += statesDerivative[:, 0]*dt
states[:, 1] += statesDerivative[:, 1]*dt
nStates : int
Number of states in the system
nParameters : int
Number of parameters in the system
measuredStates : int or array_like
Which state number are measured
Could be a single number or multiple in a list.
Observation (z) must have the same length.
'''
def __init__(self, N, R, Q, model, nStates, nParameters, measuredStates, resampleAlways=False, resampleDebug=False):
self.N = N
if not type(R) == list and not type(R) == np.ndarray:
self.R = [R]
else:
self.R = R
if not type(Q) == list and not type(Q) == np.ndarray:
self.Q = [Q]
else:
self.Q = Q
self.model = model
self.nStates = nStates
self.nParameters = nParameters
self.particles = | np.empty((self.N, self.nParameters + self.nStates)) | numpy.empty |
"""
Functionalities related to time-domain modelling using a frequency-domain code.
"""
# Copyright 2018-2021 The emsig community.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import warnings
import numpy as np
from scipy.interpolate import PchipInterpolator as Pchip
from scipy.interpolate import InterpolatedUnivariateSpline as Spline
try:
import empymod
except ImportError:
empymod = None
from emg3d import utils
__all__ = ['Fourier', ]
@utils._requires('empymod')
class Fourier:
r"""Time-domain CSEM computation.
Class to carry out time-domain modelling with the frequency-domain code
``emg3d`` following [WeMS21]_. Instances of the class take care of
computing the required frequencies, the interpolation from coarse,
limited-band frequencies to the required frequencies, and carrying out the
actual transform.
Everything related to the Fourier transform is done by utilising the
capabilities of the 1D modeller :mod:`empymod`. The input parameters
``time``, ``signal``, ``ft``, and ``ftarg`` are passed to the function
:func:`empymod.utils.check_time` to obtain the required frequencies. The
actual transform is subsequently carried out by calling
:func:`empymod.model.tem`. See these functions for more details about the
exact implementations of the Fourier transforms and its parameters. Note
that also the ``verb``-argument follows the definition in ``empymod``.
The mapping from computed frequencies to the frequencies required for the
Fourier transform is done in three steps:
- Data for :math:`f>f_\mathrm{max}` is set to 0+0j.
- Data for :math:`f<f_\mathrm{min}` is interpolated by adding an additional
data point at a frequency of 1e-100 Hz. The data for this point is
``data.real[0]+0j``, hence the real part of the lowest computed
frequency and zero imaginary part. Interpolation is carried out using
PCHIP from :func:`scipy.interpolate.pchip_interpolate`.
- Data for :math:`f_\mathrm{min}\le f \le f_\mathrm{max}` is computed
with cubic spline interpolation (on a log-scale) using
:class:`scipy.interpolate.InterpolatedUnivariateSpline`.
.. note::
The package ``empymod`` has to be installed in order to use
``Fourier``:
``pip install empymod`` or ``conda install -c conda-forge empymod``.
Parameters
----------
time : ndarray
Desired times (s).
fmin, fmax : float
Minimum and maximum frequencies (Hz) to compute:
- Data for freq > fmax is set to 0+0j.
- Data for freq < fmin is interpolated, using an extra data-point at
f = 1e-100 Hz, with value data.real[0]+0j. (Hence zero imaginary
part, and the lowest computed real value.)
signal : {-1, 0, 1}, default: 0
Source signal:
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ft : {'sin', 'cos', 'fftlog'}, default: 'sin'
Flag to choose either the Digital Linear Filter method (Sine- or
Cosine-Filter) or the FFTLog for the Fourier transform.
ftarg : dict, default depends on ``ft``
Fourier transform arguments.
- If ``ft='dlf'``:
- ``dlf``: string of filter name in :mod:`empymod.filters` or the
filter method itself; default: ``'key_201_CosSin_2012'``.
- ``pts_per_dec``: points per decade; default: -1.
- If 0: Standard DLF;
- If < 0: Lagged Convolution DLF;
- If > 0: Splined DLF.
- If ``ft='fftlog'``:
- ``pts_per_dec``: samples per decade; default: 10.
- ``add_dec``: additional decades [left, right]; default: [-2, 1].
- ``q``: exponent of power law bias, -1 <= q <= 1 ; default: 0.
input_freq : ndarray, default: None
Frequencies to use for computation. Mutually exclusive with
``every_x_freq``.
every_x_freq : int, default: None
Every ``every_x_freq``-th frequency of the required frequency-range is
used for computation. Mutually exclusive with ``input_freq``.
"""
def __init__(self, time, fmin, fmax, signal=0, ft='dlf', ftarg=None,
**kwargs):
"""Initialize a Fourier instance."""
# Store the input parameters.
self._time = time
self._fmin = fmin
self._fmax = fmax
self._signal = signal
self._ft = ft
self._ftarg = {} if ftarg is None else ftarg
self._input_freq = kwargs.pop('input_freq', None)
self._every_x_freq = kwargs.pop('every_x_freq', None)
self.verb = kwargs.pop('verb', 3)
# Ensure no kwargs left.
if kwargs:
raise TypeError(f"Unexpected **kwargs: {list(kwargs.keys())}.")
# Ensure input_freq and every_x_freq are not both set.
self._check_coarse_inputs(keep_inp_freq=True)
# Get required frequencies.
self._check_time()
def __repr__(self):
"""Simple representation."""
return (f"{self.__class__.__name__}: {self._ft}; "
f"{self.time.min()}-{self.time.max()} s; "
f"{self.fmin}-{self.fmax} Hz")
# PURE PROPERTIES
@property
def freq_required(self):
"""Frequencies required to carry out the Fourier transform."""
return self._freq_req
@property
def freq_coarse(self):
"""Coarse frequency range, can be different from `freq_required`."""
# If none of {every_x_freq, input_freq} given, then
# freq_coarse = freq_required.
if self.every_x_freq is None and self.input_freq is None:
return self.freq_required
# If input_freq given, then freq_coarse = input_freq.
elif self.every_x_freq is None:
return self.input_freq
# If every_x_freq given, get subset of freq_required.
else:
return self.freq_required[::self.every_x_freq]
@property
def ifreq_compute(self):
"""Indices of `freq_coarse` which have to be computed."""
return ((self.freq_coarse >= self.fmin) &
(self.freq_coarse <= self.fmax))
@property
def freq_compute(self):
"""Frequencies at which the model has to be computed."""
return self.freq_coarse[self.ifreq_compute]
@property
def ifreq_extrapolate(self):
"""Indices of the frequencies to extrapolate."""
return self.freq_required < self.fmin
@property
def freq_extrapolate(self):
"""These are the frequencies to extrapolate.
In the end it is done via interpolation, using an extra data-point at
f = 1e-100 Hz, with value data.real[0]+0j. (Hence zero imaginary part,
and the lowest computed real value.)
"""
return self.freq_required[self.ifreq_extrapolate]
@property
def ifreq_interpolate(self):
"""Indices of the frequencies to interpolate."""
return ((self.freq_required >= self.fmin) &
(self.freq_required <= self.fmax))
@property
def freq_interpolate(self):
"""These are the frequencies to interpolate.
If ``freq_required`` is equal ``freq_coarse``, then this is equal to
``freq_compute``.
"""
return self.freq_required[self.ifreq_interpolate]
@property
def ft(self):
"""Type of Fourier transform.
Set via ``fourier_arguments(ft, ftarg)``.
"""
return self._ft
@property
def ftarg(self):
"""Fourier transform arguments.
Set via ``fourier_arguments(ft, ftarg)``.
"""
return self._ftarg
# PROPERTIES WITH SETTERS
@property
def time(self):
"""Desired times (s)."""
return self._time
@time.setter
def time(self, time):
"""Update desired times (s)."""
self._time = time
self._check_time()
@property
def fmax(self):
"""Maximum frequency (Hz) to compute."""
return self._fmax
@fmax.setter
def fmax(self, fmax):
"""Update maximum frequency (Hz) to compute."""
self._fmax = fmax
self._print_freq_calc()
@property
def fmin(self):
"""Minimum frequency (Hz) to compute."""
return self._fmin
@fmin.setter
def fmin(self, fmin):
"""Update minimum frequency (Hz) to compute."""
self._fmin = fmin
self._print_freq_calc()
@property
def signal(self):
"""Signal in time domain {-1, 0, 1}."""
return self._signal
@signal.setter
def signal(self, signal):
"""Update signal in time domain {-1, 0, 1}."""
self._signal = signal
@property
def input_freq(self):
"""If set, freq_coarse is set to input_freq."""
return self._input_freq
@input_freq.setter
def input_freq(self, input_freq):
"""Update input_freq. Erases every_x_freq if set."""
self._input_freq = input_freq
self._check_coarse_inputs(keep_inp_freq=True)
@property
def every_x_freq(self):
"""If set, freq_coarse is every_x_freq-frequency of freq_required."""
return self._every_x_freq
@every_x_freq.setter
def every_x_freq(self, every_x_freq):
"""Update every_x_freq. Erases input_freq if set."""
self._every_x_freq = every_x_freq
self._check_coarse_inputs(keep_inp_freq=False)
# OTHER STUFF
def fourier_arguments(self, ft, ftarg):
"""Set Fourier type and its arguments."""
self._ft = ft
self._ftarg = ftarg
self._check_time()
def interpolate(self, fdata):
"""Interpolate from computed data to required data.
Parameters
----------
fdata : ndarray
Frequency-domain data corresponding to ``freq_compute``.
Returns
-------
full_data : ndarray
Frequency-domain data corresponding to ``freq_required``.
"""
# Pre-allocate result.
out = np.zeros(self.freq_required.size, dtype=np.complex128)
# 1. Interpolate between fmin and fmax.
# If freq_coarse is not exactly freq_required, we use cubic spline to
# interpolate from fmin to fmax.
if self.freq_coarse.size != self.freq_required.size:
int_real = Spline(np.log(self.freq_compute),
fdata.real)(np.log(self.freq_interpolate))
int_imag = Spline( | np.log(self.freq_compute) | numpy.log |
#!/usr/bin/env python
# coding: utf-8
import math
import random
import warnings
import matplotlib
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform, poisson
from statsmodels.distributions.empirical_distribution import ECDF
def discrete_weibull(shape, scale, N2):
x_pre_scale = np.random.weibull(shape, int(5e6))
x = scale * x_pre_scale
f = ECDF(x)
h = np.zeros(N2)
h[0] = f(1.5) - f(0)
for i in range(1, N2):
h[i] = (f(i+1.5) - f(i+0.5)) / (1-f(i+0.5))
s = | np.zeros(N2) | numpy.zeros |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# - <NAME>
# <<EMAIL>>, <<EMAIL>>
# - <NAME>
# <<EMAIL>>, <<EMAIL>>
# Language: python2.7
import numpy as np
np.random.seed(1337) # make the results reproductible
from math import floor
'''
Synthetic dataset for classification rules
x = (x1, x2, x3, x4)
x1, x2 ~ U[0, 1]
x3 = {0, 1}
x4 = {blue -1, white 0, red 1}
y = 0, 1 ou 2
y = 0 <= r01 = {x4: red, x3: 1};
r02 = {x4: red, x3: 0, x2 <= 0.5};
r03 = {x4: blue or white, x1 >= 0.7, x3: 0, x2 > 0.2};
r04 = {x4: white, x1: ]0.5, 0.7[}
y = 1 <= r11 = {x4: red, x3: 0, x2 > 0.5};
r12 = {x4: blue ou white, x1 >= 0.7, x3: 0, x2 <= 0.2};
r13 = {x4: white, x1 <= 0.5}
y = 2 <= r21 = {x4: blue ou white, x1 >= 0.7, x3: 1}
[x4-red]
| |
[x1>=0.7] [x3=0]
| | | |
[x1-blue] [x3=1] [y=0] [x2<=0.5]
| | | | | |
[x1<=0.5] [y=2] [x2<=2][y=2] [y=1] [y=0]
| | | |
[y=0] [y=1] [y=0][y=1]
'''
class DecisionTreeSamples():
def __init__(self, n, e=None):
'''
Initialize the synthetic dataset (X, y) based on the synthetic decision
tree.
Parameters:
- n: number of samples to generate
- e: noisy or impurity degree to integrate in the rules under the form
of classification error expressed in percentage, None by default.
The same or a different amount of noise can be introduced for each
rule:
*To introduce different amount of noise for each rule, e must be
as a dictionary with {'r01': e1, 'r02': e2, 'r03': e3, 'r04': e04,
'r11': e11, 'r12': e12, 'r13': e13, 'r21': e21} where 'r01', 'r02',
etc. correspond to the name of the rules (see above) and e1, e2, etc.
the noise specific to each one of these rules.
*To introfuce the same amount of noise, e must be a int or a float.
'''
# Generate synthetic dataset (X, y)
X = | np.zeros((n, 4)) | numpy.zeros |
#!/usr/bin/env python2.7
from numpy import (zeros, empty, multiply, copyto, asarray)
from numexpr import evaluate
import pylab as py
import urllib
grid_shape = (512, 512)
def roll_add(rollee, shift, axis, out):
if shift == 1 and axis == 0:
out[1:, :] += rollee[:-1,:]
out[0, :] += rollee[-1,:]
elif shift == -1 and axis == 0:
out[:-1, :] += rollee[1:,:]
out[-1, :] += rollee[0,:]
elif shift == 1 and axis == 1:
out[:, 1:] += rollee[:, :-1]
out[:, 0] += rollee[:, -1]
elif shift == -1 and axis == 1:
out[:, :-1] += rollee[:, 1:]
out[:, -1] += rollee[:, 0]
def laplacian(grid, out):
| copyto(out, grid) | numpy.copyto |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: Estimate local time
version: 0.0.3
created: 2018-04-30
author: <NAME>
dependencies:
* tidepool-data-env (install using anaconda, see readme for details)
* wikipedia-timezone-aliases-2018-04-28.csv
license: BSD-2-Clause
TODO:
* [] see readme file
"""
# %% REQUIRED LIBRARIES
import pandas as pd
import numpy as np
import os
import sys
from pytz import timezone
from datetime import timedelta
import datetime as dt
import argparse
# %% USER INPUTS
codeDescription = "Estimate local time for each data point in the dataset"
codeVersion = "0.0.3"
parser = argparse.ArgumentParser(description=codeDescription)
parser.add_argument("-i",
"--input-data-file",
dest="inputFilePathAndName",
default="example-csv.csv",
help="csv, xlsx, or json file that contains Tidepool data")
parser.add_argument("--deprecated-timezone-list",
dest="timezoneAliasesFilePathAndName",
default="wikipedia-timezone-aliases-2018-04-28.csv",
help="a .csv file that contains a list of deprecated " +
"timezones and their alias")
parser.add_argument("-o",
"--output-data-path",
dest="outputPath",
default=os.path.join("output",
"dataWithLocalTimeEstimates"),
help="the output where the data is stored")
parser.add_argument("--day-series-output-path",
dest="daySeriesOutputPath",
default=os.path.join("output", "daySeriesData"),
help="optional path to store the contiguous day series" +
"data. If no path is specified, then data is not saved")
args = parser.parse_args()
# %% FUNCTIONS
def filterByDates(df, startDate, endDate):
# filter by qualified start & end date, and sort
df = \
df[(df.time >= startDate) &
(df.time <= (endDate + "T23:59:59"))]
return df
def convertDeprecatedTimezoneToAlias(df, tzAlias):
if "timezone" in df:
uniqueTimezones = df.timezone.unique()
uniqueTimezones = uniqueTimezones[pd.notnull(df.timezone.unique())]
for uniqueTimezone in uniqueTimezones:
alias = tzAlias.loc[tzAlias.tz.str.endswith(uniqueTimezone),
["alias"]].values
if len(alias) == 1:
df.loc[df.timezone == uniqueTimezone, ["timezone"]] = alias
return df
def largeTimezoneOffsetCorrection(df):
while ((df.timezoneOffset > 840).sum() > 0):
df.loc[df.timezoneOffset > 840, ["conversionOffset"]] = \
df.loc[df.timezoneOffset > 840, ["conversionOffset"]] - \
(1440 * 60 * 1000)
df.loc[df.timezoneOffset > 840, ["timezoneOffset"]] = \
df.loc[df.timezoneOffset > 840, ["timezoneOffset"]] - 1440
while ((df.timezoneOffset < -720).sum() > 0):
df.loc[df.timezoneOffset < -720, ["conversionOffset"]] = \
df.loc[df.timezoneOffset < -720, ["conversionOffset"]] + \
(1440 * 60 * 1000)
df.loc[df.timezoneOffset < -720, ["timezoneOffset"]] = \
df.loc[df.timezoneOffset < -720, ["timezoneOffset"]] + 1440
return df
def createContiguousDaySeries(df):
firstDay = df.date.min()
lastDay = df.date.max()
rng = pd.date_range(firstDay, lastDay).date
contiguousDaySeries = \
pd.DataFrame(rng, columns=["date"]).sort_values(
"date", ascending=False).reset_index(drop=True)
return contiguousDaySeries
def getAndPreprocessUploadRecords(df):
# first make sure deviceTag is in string format
df["deviceTags"] = df.deviceTags.astype(str)
# filter by type upload
ud = df[df.type == "upload"].copy()
# define a device type (e.g., pump, cgm, or healthkit)
ud["deviceType"] = np.nan
ud.loc[ud.deviceTags.str.contains("pump"), ["deviceType"]] = "pump"
# this is for non-healthkit cgm records only
ud.loc[((ud.deviceTags.str.contains("cgm")) &
(ud.timeProcessing != "none")), ["deviceType"]] = "cgm"
ud.loc[((ud.deviceTags.str.contains("cgm")) &
(ud.timeProcessing == "none")), ["deviceType"]] = "healthkit"
return ud
def getAndPreprocessNonDexApiCgmRecords(df):
# non-healthkit cgm and exclude dexcom-api data
if "payload" in df:
# convert payloads to strings
df["isDexcomAPI"] = df.payload.astype(str).str.contains("systemTime")
cd = df[(df.type == "cbg") &
(df.timezoneOffset.notnull()) &
(~df.isDexcomAPI.fillna(False))].copy()
else:
cd = df[(df.type == "cbg") & (df.timezoneOffset.notnull())]
return cd
def getTimezoneOffset(currentDate, currentTimezone):
tz = timezone(currentTimezone)
# here we add 1 day to the current date to account for changes to/from DST
tzoNum = int(tz.localize(currentDate + timedelta(days=1)).strftime("%z"))
tzoHours = np.floor(tzoNum / 100)
tzoMinutes = round((tzoNum / 100 - tzoHours) * 100, 0)
tzoSign = np.sign(tzoHours)
tzo = int((tzoHours * 60) + (tzoMinutes * tzoSign))
return tzo
def getTzoForDateTime(currentDateTime, currentTimezone):
tz = timezone(currentTimezone)
tzoNum = int(tz.localize(pd.to_datetime(currentDateTime)).strftime("%z"))
tzoHours = np.floor(tzoNum / 100)
tzoMinutes = round((tzoNum / 100 - tzoHours) * 100, 0)
tzoSign = np.sign(tzoHours)
tzo = int((tzoHours * 60) + (tzoMinutes * tzoSign))
return tzo
def isDSTChangeDay(currentDate, currentTimezone):
tzoCurrentDay = getTimezoneOffset(pd.to_datetime(currentDate),
currentTimezone)
tzoPreviousDay = getTimezoneOffset(pd.to_datetime(currentDate) +
timedelta(days=-1), currentTimezone)
return (tzoCurrentDay != tzoPreviousDay)
def addAnnotation(df, idx, annotationMessage):
if pd.notnull(df.loc[idx, "est.annotations"]):
df.loc[idx, ["est.annotations"]] = df.loc[idx, "est.annotations"] + \
", " + annotationMessage
else:
df.loc[idx, ["est.annotations"]] = annotationMessage
return df
def addDeviceDaySeries(df, dfContDays, deviceTypeName):
if len(df) > 0:
dfDayGroups = df.groupby("date")
dfDaySeries = pd.DataFrame(dfDayGroups.timezoneOffset.median())
if "upload" in deviceTypeName:
if "timezone" in df:
if dfDayGroups.timezone.count().values[0] > 0:
dfDaySeries["timezone"] = \
dfDayGroups.timezone.describe()["top"]
# get the timezone offset for the timezone
for i in dfDaySeries.index:
if pd.notnull(dfDaySeries.loc[i, "timezone"]):
tzo = getTimezoneOffset(
pd.to_datetime(i),
dfDaySeries.loc[i, "timezone"])
dfDaySeries.loc[i, ["timezoneOffset"]] = tzo
dfDaySeries["timeProcessing"] = \
dfDayGroups.timeProcessing.describe()["top"]
dfDaySeries = dfDaySeries.add_prefix(deviceTypeName + "."). \
rename(columns={deviceTypeName + ".date": "date"})
dfContDays = pd.merge(dfContDays, dfDaySeries.reset_index(),
on="date", how="left")
else:
dfContDays[deviceTypeName + ".timezoneOffset"] = np.nan
return dfContDays
def imputeUploadRecords(df, contDays, deviceTypeName):
daySeries = \
addDeviceDaySeries(df, contDays, deviceTypeName)
if ((len(df) > 0) & (deviceTypeName + ".timezone" in daySeries)):
for i in daySeries.index[1:]:
if pd.isnull(daySeries[deviceTypeName + ".timezone"][i]):
daySeries.loc[i, [deviceTypeName + ".timezone"]] = \
daySeries.loc[i-1, deviceTypeName + ".timezone"]
if pd.notnull(daySeries[deviceTypeName + ".timezone"][i]):
tz = daySeries.loc[i, deviceTypeName + ".timezone"]
tzo = \
getTimezoneOffset(pd.to_datetime(daySeries.loc[i, "date"]),
tz)
daySeries.loc[i, deviceTypeName + ".timezoneOffset"] = tzo
if pd.notnull(daySeries[deviceTypeName + ".timeProcessing"][i-1]):
daySeries.loc[i, deviceTypeName + ".timeProcessing"] = \
daySeries.loc[i-1, deviceTypeName + ".timeProcessing"]
else:
daySeries[deviceTypeName + ".timezone"] = np.nan
daySeries[deviceTypeName + ".timeProcessing"] = np.nan
return daySeries
def estimateTzAndTzoWithUploadRecords(cDF):
cDF["est.type"] = np.nan
cDF["est.gapSize"] = np.nan
cDF["est.timezoneOffset"] = cDF["upload.timezoneOffset"]
cDF["est.annotations"] = np.nan
if "upload.timezone" in cDF:
cDF.loc[cDF["upload.timezone"].notnull(), ["est.type"]] = "UPLOAD"
cDF["est.timezone"] = cDF["upload.timezone"]
cDF["est.timeProcessing"] = cDF["upload.timeProcessing"]
else:
cDF["est.timezone"] = np.nan
cDF["est.timeProcessing"] = np.nan
cDF.loc[((cDF["est.timezoneOffset"] !=
cDF["home.imputed.timezoneOffset"]) &
(pd.notnull(cDF["est.timezoneOffset"]))),
"est.annotations"] = "travel"
return cDF
def estimateTzAndTzoWithDeviceRecords(cDF):
# 2A. use the TZO of the pump or cgm device if it exists on a given day. In
# addition, compare the TZO to one of the imputed day series (i.e., the
# upload and home series to see if the TZ can be inferred)
for deviceType in ["pump", "cgm"]:
# find the indices of days where a TZO estimate has not been made AND
# where the device (e.g., pump or cgm) TZO has data
sIndices = cDF[((cDF["est.timezoneOffset"].isnull()) &
(cDF[deviceType + ".timezoneOffset"].notnull()))].index
# compare the device TZO to the imputed series to infer time zone
cDF = compareDeviceTzoToImputedSeries(cDF, sIndices, deviceType)
# 2B. if the TZ cannot be inferred with 2A, then see if the TZ can be
# inferred from the previous day's TZO. If the device TZO is equal to the
# previous day's TZO, AND if the previous day has a TZ estimate, use the
# previous day's TZ estimate for the current day's TZ estimate
for deviceType in ["pump", "cgm"]:
sIndices = cDF[((cDF["est.timezoneOffset"].isnull()) &
(cDF[deviceType + ".timezoneOffset"].notnull()))].index
cDF = compareDeviceTzoToPrevDayTzo(cDF, sIndices, deviceType)
# 2C. after 2A and 2B, check the DEVICE estimates to make sure that the
# pump and cgm tzo do not differ by more than 60 minutes. If they differ
# by more that 60 minutes, then mark the estimate as UNCERTAIN. Also, we
# allow the estimates to be off by 60 minutes as there are a lot of cases
# where the devices are off because the user changes the time for DST,
# at different times
sIndices = cDF[((cDF["est.type"] == "DEVICE") &
(cDF["pump.timezoneOffset"].notnull()) &
(cDF["cgm.timezoneOffset"].notnull()) &
(cDF["pump.timezoneOffset"] != cDF["cgm.timezoneOffset"])
)].index
tzoDiffGT60 = abs(cDF.loc[sIndices, "cgm.timezoneOffset"] -
cDF.loc[sIndices, "pump.timezoneOffset"]) > 60
idx = tzoDiffGT60.index[tzoDiffGT60]
cDF.loc[idx, ["est.type"]] = "UNCERTAIN"
for i in idx:
cDF = addAnnotation(cDF, i, "pump-cgm-tzo-mismatch")
return cDF
def addHomeTimezone(df, contDays):
if "timezone" in df:
homeTimezone = df["timezone"].describe()["top"]
tzo = contDays.date.apply(
lambda x: getTimezoneOffset(pd.to_datetime(x), homeTimezone))
contDays["home.imputed.timezoneOffset"] = tzo
contDays["home.imputed.timezone"] = homeTimezone
else:
contDays["home.imputed.timezoneOffset"] = np.nan
contDays["home.imputed.timezone"] = np.nan
contDays["home.imputed.timeProcessing"] = np.nan
return contDays
def getRangeOfTZOsForTimezone(tz):
minMaxTzo = [getTimezoneOffset(pd.to_datetime("1/1/2017"), tz),
getTimezoneOffset(pd.to_datetime("5/1/2017"), tz)]
rangeOfTzo = np.arange(int(min(minMaxTzo)), int(max(minMaxTzo))+1, 15)
return rangeOfTzo
def tzoRangeWithComparisonTz(df, i, comparisonTz):
# if we have a previous timezone estimate, then calcuate the range of
# timezone offset values for that time zone
if pd.notnull(comparisonTz):
rangeTzos = getRangeOfTZOsForTimezone(comparisonTz)
else:
comparisonTz = np.nan
rangeTzos = np.array([])
return rangeTzos
def tzAndTzoRangePreviousDay(df, i):
# if we have a previous timezone estimate, then calcuate the range of
# timezone offset values for that time zone
comparisonTz = df.loc[i-1, "est.timezone"]
rangeTzos = tzoRangeWithComparisonTz(df, i, comparisonTz)
return comparisonTz, rangeTzos
def tzAndTzoRangeWithHomeTz(df, i):
# if we have a previous timezone estimate, then calcuate the range of
# timezone offset values for that time zone
comparisonTz = df.loc[i, "home.imputed.timezone"]
rangeTzos = tzoRangeWithComparisonTz(df, i, comparisonTz)
return comparisonTz, rangeTzos
def assignTzoFromImputedSeries(df, i, imputedSeries):
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[i, imputedSeries + ".timezoneOffset"]
df.loc[i, ["est.timezone"]] = \
df.loc[i, imputedSeries + ".timezone"]
df.loc[i, ["est.timeProcessing"]] = \
df.loc[i, imputedSeries + ".timeProcessing"]
return df
def compareDeviceTzoToImputedSeries(df, sIdx, device):
for i in sIdx:
# if the device tzo = imputed tzo, then chose the imputed tz and tzo
# note, dst is accounted for in the imputed tzo
for imputedSeries in ["pump.upload.imputed", "cgm.upload.imputed",
"healthkit.upload.imputed", "home.imputed"]:
# if the estimate has not already been made
if pd.isnull(df.loc[i, "est.timezone"]):
if df.loc[i, device + ".timezoneOffset"] == \
df.loc[i, imputedSeries + ".timezoneOffset"]:
assignTzoFromImputedSeries(df, i, imputedSeries)
df = addAnnotation(df, i,
"tz-inferred-from-" + imputedSeries)
# if the imputed series has a timezone estimate, then see if
# the current day is a dst change day
elif (pd.notnull(df.loc[i, imputedSeries + ".timezone"])):
imputedTimezone = df.loc[i, imputedSeries + ".timezone"]
if isDSTChangeDay(df.loc[i, "date"], imputedTimezone):
dstRange = getRangeOfTZOsForTimezone(imputedTimezone)
if ((df.loc[i, device + ".timezoneOffset"] in dstRange)
& (df.loc[i, imputedSeries + ".timezoneOffset"] in dstRange)):
assignTzoFromImputedSeries(df, i, imputedSeries)
df = addAnnotation(df, i, "dst-change-day")
df = addAnnotation(
df, i, "tz-inferred-from-" + imputedSeries)
return df
def assignTzoFromPreviousDay(df, i, previousDayTz):
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezone"]] = previousDayTz
df.loc[i, ["est.timezoneOffset"]] = \
getTimezoneOffset(pd.to_datetime(df.loc[i, "date"]), previousDayTz)
df.loc[i, ["est.timeProcessing"]] = df.loc[i-1, "est.timeProcessing"]
df = addAnnotation(df, i, "tz-inferred-from-prev-day")
return df
def assignTzoFromDeviceTzo(df, i, device):
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[i, device + ".timezoneOffset"]
df.loc[i, ["est.timeProcessing"]] = \
df.loc[i, device + ".upload.imputed.timeProcessing"]
df = addAnnotation(df, i, "likely-travel")
df = addAnnotation(df, i, "tzo-from-" + device)
return df
def compareDeviceTzoToPrevDayTzo(df, sIdx, device):
for i in sIdx[sIdx > 0]:
# first see if the previous record has a tzo
if (pd.notnull(df.loc[i-1, "est.timezoneOffset"])):
previousDayTz, dstRange = tzAndTzoRangePreviousDay(df, i)
timeDiff = abs((df.loc[i, device + ".timezoneOffset"]) -
df.loc[i-1, "est.timezoneOffset"])
# next see if the previous record has a tz
if (pd.notnull(df.loc[i-1, "est.timezone"])):
if timeDiff == 0:
assignTzoFromPreviousDay(df, i, previousDayTz)
# see if the previous day's tzo and device tzo are within the
# dst range (as that is a common problem with this data)
elif ((df.loc[i, device + ".timezoneOffset"] in dstRange)
& (df.loc[i-1, "est.timezoneOffset"] in dstRange)):
# then see if it is DST change day
if isDSTChangeDay(df.loc[i, "date"], previousDayTz):
df = addAnnotation(df, i, "dst-change-day")
assignTzoFromPreviousDay(df, i, previousDayTz)
# if it is not DST change day, then mark this as uncertain
else:
# also, check to see if the difference between device.
# tzo and prev.tzo is less than the expected dst
# difference. There is a known issue where the BtUTC
# procedure puts clock drift into the device.tzo,
# and as a result the tzo can be off by 15, 30,
# or 45 minutes.
if (((df.loc[i, device + ".timezoneOffset"] ==
min(dstRange)) |
(df.loc[i, device + ".timezoneOffset"] ==
max(dstRange))) &
((df.loc[i-1, "est.timezoneOffset"] ==
min(dstRange)) |
(df.loc[i-1, "est.timezoneOffset"] ==
max(dstRange)))):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i,
"likely-dst-error-OR-travel")
else:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i,
"likely-15-min-dst-error")
# next see if time difference between device.tzo and prev.tzo
# is off by 720 minutes, which is indicative of a common
# user AM/PM error
elif timeDiff == 720:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-AM-PM-error")
# if it doesn't fall into any of these cases, then the
# tzo difference is likely due to travel
else:
df = assignTzoFromDeviceTzo(df, i, device)
elif timeDiff == 0:
df = assignTzoFromDeviceTzo(df, i, device)
# if there is no previous record to compare with check for dst errors,
# and if there are no errors, it is likely a travel day
else:
comparisonTz, dstRange = tzAndTzoRangeWithHomeTz(df, i)
timeDiff = abs((df.loc[i, device + ".timezoneOffset"]) -
df.loc[i, "home.imputed.timezoneOffset"])
if ((df.loc[i, device + ".timezoneOffset"] in dstRange)
& (df.loc[i, "home.imputed.timezoneOffset"] in dstRange)):
# see if it is DST change day
if isDSTChangeDay(df.loc[i, "date"], comparisonTz):
df = addAnnotation(df, i, "dst-change-day")
df.loc[i, ["est.type"]] = "DEVICE"
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[i, device + ".timezoneOffset"]
df.loc[i, ["est.timezone"]] = \
df.loc[i, "home.imputed.timezone"]
df.loc[i, ["est.timeProcessing"]] = \
df.loc[i, device + ".upload.imputed.timeProcessing"]
# if it is not DST change day, then mark this as uncertain
else:
# also, check to see if the difference between device.
# tzo and prev.tzo is less than the expected dst
# difference. There is a known issue where the BtUTC
# procedure puts clock drift into the device.tzo,
# and as a result the tzo can be off by 15, 30,
# or 45 minutes.
if (((df.loc[i, device + ".timezoneOffset"] ==
min(dstRange)) |
(df.loc[i, device + ".timezoneOffset"] ==
max(dstRange))) &
((df.loc[i, "home.imputed.timezoneOffset"] ==
min(dstRange)) |
(df.loc[i, "home.imputed.timezoneOffset"] ==
max(dstRange)))):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-dst-error-OR-travel")
else:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-15-min-dst-error")
# next see if time difference between device.tzo and prev.tzo
# is off by 720 minutes, which is indicative of a common
# user AM/PM error
elif timeDiff == 720:
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "likely-AM-PM-error")
# if it doesn't fall into any of these cases, then the
# tzo difference is likely due to travel
else:
df = assignTzoFromDeviceTzo(df, i, device)
return df
def getImputIndices(df, sIdx, hIdx):
lastDayIdx = len(df) - 1
currentDayIdx = sIdx.min()
tempList = pd.Series(hIdx) - currentDayIdx
prevDayIdx = currentDayIdx - 1
nextDayIdx = \
min(currentDayIdx + min(tempList[tempList >= 0]), lastDayIdx)
return currentDayIdx, prevDayIdx, nextDayIdx
def imputeByTimezone(df, currentDay, prevDaywData, nextDaywData):
gapSize = (nextDaywData - currentDay)
if prevDaywData >= 0:
if df.loc[prevDaywData, "est.timezone"] == \
df.loc[nextDaywData, "est.timezone"]:
tz = df.loc[prevDaywData, "est.timezone"]
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.timezone"]] = tz
df.loc[i, ["est.timezoneOffset"]] = \
getTimezoneOffset(pd.to_datetime(df.loc[i, "date"]), tz)
df.loc[i, ["est.type"]] = "IMPUTE"
df = addAnnotation(df, i, "gap=" + str(gapSize))
df.loc[i, ["est.gapSize"]] = gapSize
# TODO: this logic should be updated to handle the edge case
# where the day before and after the gap have differing TZ, but
# the same TZO. In that case the gap should be marked as UNCERTAIN
elif df.loc[prevDaywData, "est.timezoneOffset"] == \
df.loc[nextDaywData, "est.timezoneOffset"]:
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.timezoneOffset"]] = \
df.loc[prevDaywData, "est.timezoneOffset"]
df.loc[i, ["est.type"]] = "IMPUTE"
df = addAnnotation(df, i, "gap=" + str(gapSize))
df.loc[i, ["est.gapSize"]] = gapSize
else:
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "unable-to-impute-tzo")
else:
for i in range(currentDay, nextDaywData):
df.loc[i, ["est.type"]] = "UNCERTAIN"
df = addAnnotation(df, i, "unable-to-impute-tzo")
return df
def imputeTzAndTzo(cDF):
sIndices = cDF[cDF["est.timezoneOffset"].isnull()].index
hasTzoIndices = cDF[cDF["est.timezoneOffset"].notnull()].index
if len(hasTzoIndices) > 0:
if len(sIndices) > 0:
lastDay = max(sIndices)
while ((sIndices.min() < max(hasTzoIndices)) &
(len(sIndices) > 0)):
currentDay, prevDayWithDay, nextDayIdx = \
getImputIndices(cDF, sIndices, hasTzoIndices)
cDF = imputeByTimezone(cDF, currentDay,
prevDayWithDay, nextDayIdx)
sIndices = cDF[((cDF["est.timezoneOffset"].isnull()) &
(~cDF["est.annotations"].str.contains(
"unable-to-impute-tzo").fillna(False)))].index
hasTzoIndices = cDF[cDF["est.timezoneOffset"].notnull()].index
# try to impute to the last day (earliest day) in the dataset
# if the last record has a timezone that is the home record, then
# impute using the home timezone
if len(sIndices) > 0:
currentDay = min(sIndices)
prevDayWithDay = currentDay - 1
gapSize = lastDay - currentDay
for i in range(currentDay, lastDay + 1):
if cDF.loc[prevDayWithDay, "est.timezoneOffset"] == \
cDF.loc[prevDayWithDay, "home.imputed.timezoneOffset"]:
cDF.loc[i, ["est.type"]] = "IMPUTE"
cDF.loc[i, ["est.timezoneOffset"]] = \
cDF.loc[i, "home.imputed.timezoneOffset"]
cDF.loc[i, ["est.timezone"]] = \
cDF.loc[i, "home.imputed.timezone"]
cDF = addAnnotation(cDF, i, "gap=" + str(gapSize))
cDF.loc[i, ["est.gapSize"]] = gapSize
else:
cDF.loc[i, ["est.type"]] = "UNCERTAIN"
cDF = addAnnotation(cDF, i, "unable-to-impute-tzo")
else:
cDF["est.type"] = "UNCERTAIN"
cDF["est.annotations"] = "unable-to-impute-tzo"
return cDF
def reorderColumns(cDF):
cDF = cDF[["pump.upload.imputed.timezoneOffset",
"pump.upload.imputed.timezone",
"pump.upload.imputed.timeProcessing",
"cgm.upload.imputed.timezoneOffset",
"cgm.upload.imputed.timezone",
"cgm.upload.imputed.timeProcessing",
"healthkit.upload.imputed.timezoneOffset",
"healthkit.upload.imputed.timezone",
"healthkit.upload.imputed.timeProcessing",
"home.imputed.timezoneOffset",
"home.imputed.timezone",
"home.imputed.timeProcessing",
"upload.timezoneOffset",
"upload.timezone",
"upload.timeProcessing",
"cgm.timezoneOffset",
"pump.timezoneOffset",
"date",
"est.type",
"est.timezoneOffset",
"est.timezone",
"est.timeProcessing",
"est.annotations",
"est.gapSize",
"est.version"]]
return cDF
def readXlsxData(xlsxPathAndFileName):
# load xlsx
df = pd.read_excel(xlsxPathAndFileName, sheet_name=None, ignore_index=True)
cdf = pd.concat(df.values(), ignore_index=True)
cdf = cdf.set_index('jsonRowIndex')
return cdf
def checkInputFile(inputFile):
if os.path.isfile(inputFile):
if os.stat(inputFile).st_size > 1000:
if inputFile[-4:] == "json":
inputData = pd.read_json(inputFile, orient="records")
fileName = os.path.split(inputFile)[-1][:-5]
elif inputFile[-4:] == "xlsx":
inputData = readXlsxData(inputFile)
fileName = os.path.split(inputFile)[-1][:-5]
elif inputFile[-3:] == "csv":
inputData = pd.read_csv(inputFile, low_memory=False)
fileName = os.path.split(inputFile)[-1][:-4]
else:
sys.exit("{0} is not a json, xlsx, or csv".format(inputFile))
else:
sys.exit("{0} contains too little data".format(inputFile))
else:
sys.exit("{0} does not exist".format(inputFile))
return inputData, fileName
def getListOfDSTChangeDays(cDF):
# get a list of DST change days for the home time zone
dstChangeDays = \
cDF[abs(cDF["home.imputed.timezoneOffset"] -
cDF["home.imputed.timezoneOffset"].shift(-1)) > 0].date
return dstChangeDays
def correctEstimatesAroundDst(df, cDF):
# get a list of DST change days for the home time zone
dstChangeDays = getListOfDSTChangeDays(cDF)
# loop through the df within 2 days of a daylight savings time change
for d in dstChangeDays:
dstIndex = df[(df.date > (d + timedelta(days=-2))) &
(df.date < (d + timedelta(days=2)))].index
for dIdx in dstIndex:
if pd.notnull(df.loc[dIdx, "est.timezone"]):
tz = timezone(df.loc[dIdx, "est.timezone"])
tzRange = getRangeOfTZOsForTimezone(str(tz))
minHoursToLocal = min(tzRange)/60
tzoNum = int(tz.localize(df.loc[dIdx, "utcTime"] +
timedelta(hours=minHoursToLocal)).strftime("%z"))
tzoHours = | np.floor(tzoNum / 100) | numpy.floor |
# Library of routines for working with ASKAPsoft Self Calibration data, e.g. cont_gains_cal_SB10944_GASKAP_M344-11B_T0-0A.beam00.tab.
# These are mostly focussed around plotting the phase solutions and identifying jumps or failures in these solutions. Note that this module requires CASA support.
# The code is based on work by <NAME> and <NAME>.
# Author <NAME>
# Date 18 Oct 2020
import glob
import os
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from casacore.tables import *
import seaborn as sns
class SelfCalSolutions:
# phase is [time, beam, ant, pol]
def __init__(self):
"""Initialises parameters for reading a selfcal table
"""
self.nsol = None
self.nant = None
self.nbeam = 36
self.npol = None
# selfcal is an array in order [time, beam, ant, pol] of phase angle and amplitude value
self.selfcal = None
self.selfcal_times = None
self.selfcal_flags = None
self.field = None
def load(self, base_dir):
flist = glob.glob(base_dir + "/cont_gains*tab")
flist.sort()
filename = flist[0]
print (filename)
pos = filename.find("beam")
if pos == -1:
raise Exception("Can't find beam information in " + filename)
wildcard = filename[:pos+4] + "??" + filename[pos+6:]
flist = glob.glob(wildcard)
flist.sort()
first_beam = flist[0]
tb = table(first_beam, readonly=True, ack=False)
t_vals = tb.getcol("TIME")
sc_vals = tb.getcol("GAIN",1,1)
self.selfcal_times = t_vals[1:]
self.nsol = t_vals.shape[0] - 1
gain_shape = sc_vals.shape
self.npol = gain_shape[3]
self.nant = gain_shape[2]
tb.close()
self.selfcal = np.zeros((self.nsol, 36, self.nant, self.npol), dtype=np.complex)
self.selfcal_flags = np.zeros((self.nsol, 36, self.nant, self.npol), dtype=np.bool)
for beam in range(self.nbeam):
fname = wildcard.replace("??", "%02d" %(beam))
if os.path.exists(fname) == False:
continue
tb = table(fname, readonly=True, ack=False)
t_vals = tb.getcol("TIME", 1, self.nsol)
sc_vals = tb.getcol("GAIN", 1, self.nsol)
flag_vals = tb.getcol("GAIN_VALID", 1, self.nsol)
for index in range(self.nsol):
self.selfcal[index, beam] = sc_vals[index, 0, :, :]
self.selfcal_flags[index, beam] = np.invert(flag_vals[index, 0, :, :])
self.selfcal[np.where(self.selfcal_flags)] = np.nan
self.field = os.path.basename(base_dir)
print("Read %d solutions, %d antennas, %d beams, %d polarisations" %(self.nsol, self.nant, self.nbeam, self.npol))
def plotGains(self, ant, outFile = None):
fig = plt.figure(figsize=(14, 14))
amplitudes = np.abs(self.selfcal)
phases = np.angle(self.selfcal, deg=True)
times = np.array(range(self.nsol))
plt.subplot(1, 1, 1)
if self.nant == 36:
plt.title("ak%02d" %(ant+1), fontsize=8)
else:
plt.title("ant%02d" %(ant), fontsize=8)
for beam in range(self.nbeam):
plt.plot(times, phases[:,beam,ant,0], marker=None, label="beam %d" %(beam))
# plt.plot(times, phases[:,ant,beam,1], marker=None, color="red")
plt.ylim(-200.0, 200.0)
#rms = np.sqrt(np.mean(np.square(phases[:,beam,ant,0])))
#print ("ant ak{:02d} beam {:02d} rms={:.2f}".format(ant+1, beam, rms))
plt.legend()
plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile)
plt.close()
def _plot_ant_phase(sc, ant, outFile = None):
fig = plt.figure(figsize=(14, 14))
amplitudes = np.abs(sc.selfcal)
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
ax = plt.subplot(1, 1, 1)
if sc.nant == 36:
plt.title("ak%02d" %(ant+1), fontsize=8)
else:
plt.title("ant%02d" %(ant), fontsize=8)
low = np.nanpercentile(phases[:,:,ant,0], 2.5, axis=(1))
high = np.nanpercentile(phases[:,:,ant,0], 97.5, axis=(1))
colours = sns.color_palette()
ax.plot(np.nanmedian(phases[:,:,ant,0], axis=(1)), color=colours[0], label='median')
ax.fill_between(range(phases.shape[0]), low, high, color=colours[0], alpha= .2, label=r'95\% range')
ax.plot(np.nanmax(phases[:,:,ant,0], axis=(1)), color=colours[1], ls=':', label='maximum')
ax.plot(np.nanmin(phases[:,:,ant,0], axis=(1)), color=colours[1], ls=':', label='minimum')
plt.ylim(-200.0, 200.0)
plt.legend()
plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile)
plt.close()
def _plot_rms_map(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
#rms = np.sqrt(np.nanmean(np.square(phases[:,:,:,:]), axis=0))
rms = np.std(phases[:,:,:,:], axis=0)
print (np.nanmin(rms), np.nanmedian(rms), np.nanmax(rms))
ant_list = ['ak{:02}'.format(i+1) for i in range(36)]
beam_list = ['b{:02}'.format(i) for i in range(36)]
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
for i, ax in enumerate(axs):
sns.heatmap(rms[:,:,i].transpose(), ax=ax, cmap='GnBu', square=True, xticklabels=beam_list, yticklabels=ant_list,
vmin=0, vmax=40, linewidths=.5, cbar_kws={"shrink": .9, "label": 'Phase Standard Deviation (deg)'})
ax.set_title('Self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Beam')
axs[0].set_ylabel(r'Antenna')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_summary_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
colours = sns.color_palette()
for i, ax in enumerate(axs):
low = np.nanpercentile(phases[:,:,:,i], 2.5, axis=(1,2))
high = np.nanpercentile(phases[:,:,:,i], 97.5, axis=(1,2))
low_ant = np.nanmin(np.nanpercentile(phases[:,:,:,i], 2.5, axis=(1)), axis=1)
high_ant = np.nanmax(np.nanpercentile(phases[:,:,:,i], 97.5, axis=(1)), axis=1)
low_med = np.nanmin(np.nanmedian(phases[:,:,:,i], axis=(1)), axis=1)
high_med = np.nanmax(np.nanmedian(phases[:,:,:,i], axis=(1)), axis=1)
print (low.shape, low_ant.shape)
ax.fill_between(range(phases.shape[0]), low_ant, high_ant, color=colours[2], alpha= .2, label="95 percentile range")
ax.plot(np.nanmedian(phases[:,:,:,i], axis=(1,2)), color=colours[0], label="median")
ax.fill_between(range(phases.shape[0]), low_med, high_med, color=colours[0], alpha= .4, label="median range")
ax.plot(np.nanmax(phases[:,:,:,i], axis=(1,2)), color=colours[1], ls=':', alpha=.6, label="maximum")
ax.plot(np.nanmin(phases[:,:,:,i], axis=(1,2)), color=colours[1], ls=':', alpha=.6, label="minimum")
ax.set_title('Self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Time (Integration number)')
ax.set_ylim(-200.0, 200.0)
ax.legend()
axs[0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_median_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
colours = sns.color_palette()
for i, ax in enumerate(axs):
means = np.nanmedian(phases[:,:,:,i], axis=1)
for ant in range(36):
if ant > 30:
ax.plot(means[:,ant], label="ak%02d" %(ant+1), lw=2, zorder=2)
else:
ax.plot(means[:,ant], color='grey', lw=1, zorder=1)
ax.set_title('Median self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Time (Integration number)')
axs[0].legend()
axs[0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_ant_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
colours = sns.color_palette()
for i, ax in enumerate(axs):
means = np.nanmedian(phases[:,:,:,i], axis=1)
for ant in range(36):
if ant > 30:
ax.plot(means[:,ant], label="ak%02d" %(ant+1), lw=2, zorder=2)
else:
ax.plot(means[:,ant], color='grey', lw=1, zorder=1)
ax.set_title('Median self-cal phase for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Time (Integration number)')
axs[0].legend()
axs[0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def _plot_all_phases(sc, field, outFile = None):
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
sns.set()
fig, axs = plt.subplots(6, 12, figsize=(40,16))
pols = ['XX', 'YY']
colours = sns.color_palette()
for i, pol in enumerate(pols):
for ant in range(36):
ax = axs[ant // 6, i*6+ant%6]
for beam in range(sc.nbeam):
ax.plot(times, phases[:,beam,ant,i], marker=None, label="beam %d" %(beam))
ax.set_ylim(-200.0, 200.0)
ax.set_title('Phases for ak%02d pol %s' % (ant+1, pol[i]))
#ax.set_xlabel(r'Time (Integration number)')
#axs[0].legend()
axs[0,0].set_ylabel(r'Phase (deg)')
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight', dpi=300)
plt.close()
def _plot_amp_rms_map(sc, field, outFile = None):
amplitudes = np.absolute(sc.selfcal)
times = np.array(range(sc.nsol))
rms = np.std(amplitudes[:,:,:,:], axis=0)
print (np.nanmin(rms), np.nanmedian(rms), np.nanmax(rms))
ant_list = ['ak{:02}'.format(i+1) for i in range(36)]
beam_list = ['b{:02}'.format(i) for i in range(36)]
sns.set()
fig, axs = plt.subplots(1, 2, figsize=(20,8))
pol = ['XX', 'YY']
for i, ax in enumerate(axs):
sns.heatmap(rms[:,:,i].transpose(), ax=ax, cmap='GnBu', square=True, xticklabels=beam_list, yticklabels=ant_list,
vmin=0, vmax=0.1, linewidths=.5, cbar_kws={"shrink": .9, "label": 'Bandpass Standard Deviation (Jy)'})
ax.set_title('Bandpass stability for %s pol %s' % (field, pol[i]))
ax.set_xlabel(r'Beam')
axs[0].set_ylabel(r'Antenna')
#plt.tight_layout()
if outFile == None:
plt.show()
else:
plt.savefig(outFile, bbox_inches='tight')
plt.close()
def prepare_self_cal_set(folder):
"""
Prepare a set of self cal solutions for analysis.
Parameters
----------
folder: path
Path to the folder containing the self cal solution files. Normally named after the field/interleave.
Returns
-------
The SelfCalSolutions object for use by other calls.
"""
sc = SelfCalSolutions()
sc.load(folder)
return sc
def plot_self_cal_set(sc, fig_folder):
"""
Produce plots for a set of self calibration solutions for a field.
Parameters
----------
sc: SelfCalSolutions
The loaded self cal solutions object for the field/interleave.
fig_folder: string
Path to the folder we should put any plots or reports in.
Returns
-------
The paths to the RMS map plot and the summary plot produced for this field.
"""
rms_map_plot = fig_folder + '/sc_heatmap_{}.png'.format(sc.field)
summary_plot = fig_folder + '/sc_summary_{}.png'.format(sc.field)
all_phases_plot = fig_folder + '/sc_phases_{}.png'.format(sc.field)
_plot_rms_map(sc, sc.field, rms_map_plot)
_plot_summary_phases(sc, sc.field, summary_plot)
_plot_all_phases(sc, sc.field, all_phases_plot)
return rms_map_plot, summary_plot, all_phases_plot
def calc_phase_stability(sc, phase_rms_max=40):
"""
Calculate summary statistics of the phase stability as recorded in the self-cal solution.
Parameters
----------
sc: SelfCalSolutions
The loaded self cal solutions object for the field/interleave.
phase_rms_max: double
The maximum allowed median rms before a beam or antenna is classified as bad.
Returns
-------
The number of bad beams and bad antennas.
"""
phases = np.angle(sc.selfcal, deg=True)
times = np.array(range(sc.nsol))
rms = np.std(phases[:,:,:,:], axis=0)
# phase is [time, beam, ant, pol]
bad_beams = []
bad_ant = []
for i in range(2): # polarisations XX and YY
bad_ant.append(np.median(rms[:,:,i], axis=0) >= phase_rms_max)
bad_beams.append(np.median(rms[:,:,i], axis=1) >= phase_rms_max)
bad_ant_either = bad_ant[0] | bad_ant[1]
bad_beam_either = bad_beams[0] | bad_beams[1]
print('ants', bad_ant_either)
print('beams', bad_beam_either)
return np.sum(bad_beam_either), | np.sum(bad_ant_either) | numpy.sum |
# Copyright (c) 2020. <NAME>. hk2699 at caa dot columbia dot edu.
import matplotlib as mpl
import numpy as np
import numpy_groupies as npg
import statsmodels.api as sm
from matplotlib import pyplot as plt
from data_2d import consts, load_data
from lib.pylabyk import plt2, np2
def get_coefs(
dim, dif_other,
dur, ch, cond, t_RDK_dur,
correct_only=True
):
"""
:param dim:
:param dif_other:
:param dur: [tr]
:param ch: [tr, dim]
:param cond: [tr, dim]
:param t_RDK_dur:
:param correct_only:
:return: glmres.params, glmres.bse, glmres, glmmodel
"""
id_dif = np.empty_like(cond)
for dim1 in range(consts.N_DIM):
out = np.unique(np.abs(cond[:,dim1]),
return_inverse=True)
_, id_dif[:, dim1] = out
odim = consts.N_DIM - 1 - dim
incl = (
(t_RDK_dur == dur)
& (np.isin(id_dif[:, odim], dif_other))
)
if correct_only:
incl = (
incl
& (np.sign(ch[:, odim] - 0.5)
== np.sign(cond[:, odim]))
)
ch1 = ch[incl, dim]
coh1 = cond[incl, dim]
cohs, id_cohs = np.unique(coh1, return_inverse=True)
if np.issubdtype(ch1.dtype, np.floating):
# p_ch=1 is given
ch11 = np.stack([
npg.aggregate(id_cohs, ch1),
npg.aggregate(id_cohs, 1 - ch1)
], -1)
else:
ch11 = npg.aggregate(np.vstack((id_cohs, 1 - ch1)), 1)
glmmodel = sm.GLM(
ch11, sm.add_constant(cohs), family=sm.families.Binomial())
glmres = glmmodel.fit()
return glmres.params, glmres.bse, glmres, glmmodel
def get_coefs_mesh(cond, ch, t_RDK_dur,
dif_irrs=(2, (0, 1)),
correct_only=False
) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
:param cond:
:param ch:
:param t_RDK_dur:
:param dif_irrs:
:param correct_only:
:return: (coef, se_coef, glmres, glmmodel)
coef[(bias, slope), dim, dif, dur]
"""
dims = [0, 1]
t_RDK_durs, id_durs = np.unique(t_RDK_dur, return_inverse=True)
coef, se_coef, glmres, glmmodel = np2.meshfun(
lambda *args: get_coefs(
*args,
ch=ch, cond=cond, t_RDK_dur=t_RDK_dur,
correct_only=correct_only),
[dims, dif_irrs, t_RDK_durs],
n_out=4,
outshape_first=True
)
return coef, se_coef, glmres, glmmodel
def get_coefs_from_histogram(cond, p_cond_ch):
glmmodel = sm.GLM(p_cond_ch, sm.add_constant(cond),
family=sm.families.Binomial())
glmres = glmmodel.fit()
return glmres.params, glmres.bse, glmres, glmmodel
def get_coefs_mesh_from_histogram(
p_cond_dur_ch: np.ndarray,
ev_cond_dim: np.ndarray,
dif_irrs=((2,), (0, 1))
) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
:param p_cond_dur_ch:
:param ev_cond_dim: [cond, dim]
:param dif_irrs:
return: (coef, se_coef, glmres, glmmodel)
coef[(bias, slope), dim, dif, dur]
"""
n_dim = ev_cond_dim.shape[1]
n_dif = len(dif_irrs)
n_dur = p_cond_dur_ch.shape[1]
siz = [n_dim, n_dif, n_dur]
n_coef = 4
coef = np.zeros([n_coef] + siz) + np.nan
se_coef = np.zeros([n_coef] + siz) + np.nan
glmress = np.empty(siz, dtype=np.object)
glmmodels = np.empty(siz, dtype=np.object)
p_cond_dur_ch = p_cond_dur_ch.reshape([-1] + [n_dur]
+ [consts.N_CH] * 2)
for dim_rel in range(n_dim):
for idif, dif_irr in enumerate(dif_irrs):
for idur in range(n_dur):
dim_irr = consts.get_odim(dim_rel)
cond_irr = ev_cond_dim[:, dim_irr]
adcond_irr = np.unique(np.abs(cond_irr), return_inverse=True)[1]
incl = np.isin(adcond_irr, dif_irr)
ev_cond_dim1 = ev_cond_dim[incl]
reg = [ev_cond_dim1[:, dim_rel]]
reg += [
ev_cond_dim1[:, dim_irr],
]
if len(dif_irr) > 1:
# otherwise np.abs(cond_irr) would be constant
reg.append(np.abs(ev_cond_dim1[:, dim_irr]))
reg = np.stack(reg, -1)
reg = sm.add_constant(reg)
n_coef1 = reg.shape[1]
if dim_rel == 0:
p_cond_ch = p_cond_dur_ch[incl, idur, :, :].sum(-1)
else:
p_cond_ch = p_cond_dur_ch[incl, idur, :, :].sum(-2)
glmmodel = sm.GLM(np.flip(p_cond_ch, -1),
reg,
family=sm.families.Binomial())
glmres = glmmodel.fit()
coef[:n_coef1, dim_rel, idif, idur] = glmres.params
se_coef[:n_coef1, dim_rel, idif, idur] = glmres.bse
glmress[dim_rel, idif, idur] = glmres
glmmodels[dim_rel, idif, idur] = glmmodel
return coef, se_coef, glmress, glmmodels
def get_coefs_irr_ixn_from_histogram(
p_cond_dur_ch: np.ndarray,
ev_cond_dim: np.ndarray
) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
:param p_cond_dur_ch:
:param ev_cond_dim: [cond, dim]
:param dif_irrs:
return: (coef, se_coef, glmres, glmmodel)
coef[(bias, slope), dim, dif, dur]
"""
n_dim = ev_cond_dim.shape[1]
n_dur = p_cond_dur_ch.shape[1]
siz = [n_dim, n_dur]
n_coef = 6 # constant, rel, rel x abs(irr), rel x irr, abs(irr), irr,
coef = | np.zeros([n_coef] + siz) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 11:16:49 2016
@author: tsz
"""
from __future__ import division
import os
import numpy as np
import pycity_base.classes.timer
import pycity_base.classes.sun
import pycity_base.classes.weather
import pycity_base.classes.prices
import pycity_base.classes.environment
import pycity_base.classes.demand.occupancy
import pycity_base.classes.demand.electrical_demand as ED
import pycity_base.classes.demand.domestic_hot_water as DomesticHotWater
import pycity_base.classes.demand.space_heating as sh
import pycity_base.classes.demand.zone_parameters as zp
# Location: Denver (weather inputs from ASHRAE 140 verification)
location = (39.76, -104.86)
altitude = 1609 # m
time_zone = -7
timer = pycity_base.classes.timer.Timer(time_discretization=3600,
timesteps_horizon=8760,
timesteps_used_horizon=8760,
timesteps_total=8760)
prices = pycity_base.classes.prices.Prices()
# Define src path
ashrae_path = os.path.dirname(os.path.abspath(__file__))
weather_temp_path = os.path.join(ashrae_path, 'weather_temperature.csv')
weather_beam_path = os.path.join(ashrae_path, 'weather_beam.csv')
weather_diffuse_path = os.path.join(ashrae_path, 'weather_diffuse.csv')
weather = pycity_base.classes.weather.Weather(timer,
path_temperature=weather_temp_path,
path_direct_radiation=weather_beam_path,
path_diffuse_radiation=weather_diffuse_path,
time_discretization=3600,
delimiter="\t",
use_TRY=False,
location=location,
altitude=altitude,
time_zone=time_zone)
prices = pycity_base.classes.prices.Prices()
environment = pycity_base.classes.environment.Environment(timer, weather, prices)
# Occupancy and electrical demand
occupancy = pycity_base.classes.demand.occupancy.Occupancy(environment,
number_occupants=3)
energy_input = 3000
el_dem_stochastic = ED.ElectricalDemand(environment,
method=2,
annual_demand=energy_input,
total_nb_occupants=3,
randomize_appliances=True,
light_configuration=10,
occupancy=occupancy.occupancy,
do_normalization=True)
demand_electricity = el_dem_stochastic.loadcurve
# Domestic hot water demand
dhw_stochastical = DomesticHotWater.DomesticHotWater(environment,
t_flow=60,
thermal=True,
method=2,
supply_temperature=20,
occupancy=occupancy.occupancy)
demand_hot_water = dhw_stochastical.loadcurve
# Building model
# beta: slope angle, gamma: surface azimuth angle
# S, W, N, E, Roof
beta = [90, 90, 90, 90, 0]
gamma = [0, 90, 180, 270, 0]
albedo = 0.2
internalGains = 0.3 * demand_electricity
# Heated floor area
A_f = 150 # m^2
heightWalls = 3.0 # m
volume = A_f * heightWalls
# Material properties
# Opaque surfaces
solarAbsorptance = 0.7 # alpha
infraredEmittance = 0.9 # epsilon
# Index-Orientation: South, West, North, East, Roof, Floor
A_windows = np.array([7.5, 7.5, 7.5, 7.5, 0, 0])
A_walls_ext = np.array([42.25, 42.25, 42.25, 42.25, 99.75, 99.75])
A_walls = A_walls_ext - A_windows
A_walls_int = [375, 75, 75] #m²; [intWall, intCeiling, intFloor]
A_walls = | np.append(A_walls_ext, A_walls_int) | numpy.append |
"""
A method that computes the constraint violations, where its considered a
violation if P(General|x) < P(Specific|x)
"""
from typing import Dict, List
from box_mlc.dataset_readers.hierarchy_readers.hierarchy_reader import (
HierarchyReader,
)
from torch.nn.parameter import Parameter
from allennlp.common import Registrable
import logging
import torch
import numpy as np
logger = logging.getLogger(__name__)
# TODO: Remove the parent class Module
# TODO: remove the extra useless parameter adjacency_matrix_param
class ConstraintViolation(torch.nn.Module,Registrable):
"""
Given a hierarchy in the form of an adjacency matrix or cooccurence
statistic in the adjacency matrix format, compute the average
constraint violation.
"""
def __init__(
self,
hierarchy_reader: HierarchyReader,
cooccurence_threshold: float = 1,
) -> None:
"""
Args:
hierarchy_reader: Creates the adjacency_matrix and the mask.
cooccurence_threshold: If adjecency matrix captures the cooc stats, threshold determines
if an edge exixst b/w labels. Row->general.Column->Specific.
"""
super().__init__() # type:ignore
#self.adjacency_matrix_param = torch.nn.Parameter(hierarchy_reader.adjacency_matrix, requires_grad=False) # This is useless but present only so that we can load old models.
self.adjacency_matrix = (
hierarchy_reader.adjacency_matrix.detach().cpu().numpy()
)
self.threshold = cooccurence_threshold
def get_true_mask(self, true_labels: np.ndarray) -> np.ndarray:
true_mask = true_labels.copy()
true_mask[true_mask == 1] = -100000
true_mask[true_mask == 0] = 1
return true_mask
def __call__(
self, positive_probabilities: torch.Tensor, true_labels: torch.Tensor
) -> Dict:
"""
true_labels: (examples, labels). True labels for the given example.
Should follow same label indexing as the adj. matrix.
positive_probabilities: (examples, labels). Predicted probabilities by the model.
"""
positive_probabilities = positive_probabilities.detach().cpu().numpy()
true_labels = true_labels.detach().cpu().numpy()
edges_idx = np.argwhere(self.adjacency_matrix >= self.threshold)
true_mask = self.get_true_mask(true_labels)
# logger.info(f"Processing {len(edges_idx)} edges")
avg_number_of_violations: List = []
number_of_violations: List = []
extent_of_violations: List = []
frequency: List = []
distances: List = []
no_examples_edges_count: int = 0
for edge in edges_idx:
ind = np.logical_and(
true_labels[:, edge[0]], true_labels[:, edge[1]]
) # examples where the edge is present
true_subset = true_labels.copy()[ind]
if true_subset.shape[0] > 0:
frequency.append(true_subset.shape[0])
true_mask_subset = true_mask.copy()[ind]
true_mask_subset[:, edge[0]] = 1
true_mask_subset[:, edge[1]] = 1
positive_subset = positive_probabilities.copy()[
ind
] # (#subset_ex, num_labels)
extent_of_violations.append(
np.mean(
positive_subset[:, edge[0]]
- positive_subset[:, edge[1]]
)
)
sorted_ind = np.argsort(
-1 * positive_subset * true_mask_subset, axis=1
)
distance_g_s = (
np.argwhere(sorted_ind == edge[0])[:, -1]
- | np.argwhere(sorted_ind == edge[1]) | numpy.argwhere |
"""
Module for customising opensim segmented muscle points
"""
import os
import re
import math
import json
import shutil
import numpy as np
import copy
from gias3.musculoskeletal.bonemodels import bonemodels
from gias3.musculoskeletal import osim
from mapclientplugins.gait2392somsomusclestep.muscleVolumeCalculator import muscleVolumeCalculator
from numpy import pi
from scipy.interpolate import interp1d
SELF_DIR = os.path.split(__file__)[0]
DATA_DIR = os.path.join(SELF_DIR, 'data/node_numbers/')
TEMPLATE_OSIM_PATH = os.path.join(SELF_DIR, 'data', 'gait2392_simbody_wrap.osim')
VALID_SEGS = {'pelvis', 'femur-l', 'femur-r', 'tibia-l', 'tibia-r'}
OSIM_FILENAME = 'gait2392_simbody.osim'
VALID_UNITS = ('nm', 'um', 'mm', 'cm', 'm', 'km')
TIBFIB_SUBMESHES = ('tibia', 'fibula')
TIBFIB_SUBMESH_ELEMS = {'tibia': range(0, 46), 'fibula': range(46, 88), }
TIBFIB_BASISTYPES = {'tri10': 'simplex_L3_L3', 'quad44': 'quad_L3_L3'}
def dim_unit_scaling(in_unit, out_unit):
"""
Calculate the scaling factor to convert from the input unit (in_unit) to
the output unit (out_unit). in_unit and out_unit must be a string and one
of ['nm', 'um', 'mm', 'cm', 'm', 'km'].
inputs
======
in_unit : str
Input unit
out_unit :str
Output unit
returns
=======
scaling_factor : float
"""
unit_vals = {
'nm': 1e-9,
'um': 1e-6,
'mm': 1e-3,
'cm': 1e-2,
'm': 1.0,
'km': 1e3,
}
if in_unit not in unit_vals:
raise ValueError(
'Invalid input unit {}. Must be one of {}'.format(
in_unit, list(unit_vals.keys())
)
)
if out_unit not in unit_vals:
raise ValueError(
'Invalid input unit {}. Must be one of {}'.format(
in_unit, list(unit_vals.keys())
)
)
return unit_vals[in_unit] / unit_vals[out_unit]
def update_femur_opensim_acs(femur_model):
femur_model.acs.update(
*bonemodels.model_alignment.createFemurACSOpenSim(
femur_model.landmarks['femur-HC'],
femur_model.landmarks['femur-MEC'],
femur_model.landmarks['femur-LEC'],
side=femur_model.side
)
)
def update_tibiafibula_opensim_acs(tibiafibula_model):
tibiafibula_model.acs.update(
*bonemodels.model_alignment.createTibiaFibulaACSOpenSim(
tibiafibula_model.landmarks['tibiafibula-MM'],
tibiafibula_model.landmarks['tibiafibula-LM'],
tibiafibula_model.landmarks['tibiafibula-MC'],
tibiafibula_model.landmarks['tibiafibula-LC'],
side=tibiafibula_model.side
)
)
def split_tibia_fibula_gfs(tib_fib_gf):
tib = tib_fib_gf.makeGFFromElements(
'tibia',
TIBFIB_SUBMESH_ELEMS['tibia'],
TIBFIB_BASISTYPES,
)
fib = tib_fib_gf.makeGFFromElements(
'fibula',
TIBFIB_SUBMESH_ELEMS['fibula'],
TIBFIB_BASISTYPES,
)
return tib, fib
def local_osim_2_global(body, model):
# find the knee angle
knee = model.joints['knee_l']
kneeAngle = model.joints['knee_l'].coordSets['knee_angle_l'].defaultValue
knee_lTrans = np.zeros(3)
# get the spline values
trans1X = knee.getSimmSplineParams('translation1')[0]
trans1Y = knee.getSimmSplineParams('translation1')[1]
f = interp1d(trans1X, trans1Y, kind='cubic')
knee_lTrans[0] = f(kneeAngle)
trans2X = knee.getSimmSplineParams('translation2')[0]
trans2Y = knee.getSimmSplineParams('translation2')[1]
f2 = interp1d(trans2X, trans2Y, kind='cubic')
knee_lTrans[1] = f2(kneeAngle)
# find the knee angle
knee = model.joints['knee_r']
kneeAngle = model.joints['knee_r'].coordSets['knee_angle_r'].defaultValue
knee_rTrans = np.zeros(3)
# get the spline values
trans1X = knee.getSimmSplineParams('translation1')[0]
trans1Y = knee.getSimmSplineParams('translation1')[1]
f = interp1d(trans1X, trans1Y, kind='cubic')
knee_rTrans[0] = f(kneeAngle)
trans2X = knee.getSimmSplineParams('translation2')[0]
trans2Y = knee.getSimmSplineParams('translation2')[1]
f2 = interp1d(trans2X, trans2Y, kind='cubic')
knee_rTrans[1] = f2(kneeAngle)
trans = None
if body == 'pelvis':
trans = np.zeros(3)
elif body == 'femur_l':
trans = model.joints['hip_l'].locationInParent
elif body == 'femur_r':
trans = model.joints['hip_r'].locationInParent
elif body == 'tibia_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans)
elif body == 'tibia_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans)
elif body == 'talus_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans +
model.joints['ankle_l'].locationInParent)
elif body == 'talus_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans +
model.joints['ankle_r'].locationInParent)
elif body == 'calcn_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans +
model.joints['ankle_l'].locationInParent +
model.joints['subtalar_l'].locationInParent)
elif body == 'calcn_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans +
model.joints['ankle_r'].locationInParent +
model.joints['subtalar_r'].locationInParent)
elif body == 'toes_l':
trans = (model.joints['hip_l'].locationInParent +
knee_lTrans +
model.joints['ankle_l'].locationInParent +
model.joints['subtalar_l'].locationInParent +
model.joints['mtp_l'].locationInParent)
elif body == 'toes_r':
trans = (model.joints['hip_r'].locationInParent +
knee_rTrans +
model.joints['ankle_r'].locationInParent +
model.joints['subtalar_r'].locationInParent +
model.joints['mtp_r'].locationInParent)
return trans
class Gait2392MuscleCustomiser(object):
def __init__(self, config, ll=None, osimmodel=None, landmarks=None):
"""
Class for customising gait2392 muscle points using host-mesh fitting
inputs
======
config : dict
Dictionary of option. (work in progress) Example:
{
'osim_output_dir': '/path/to/output/model.osim',
'in_unit': 'mm',
'out_unit': 'm',
'write_osim_file': True,
'update_knee_splines': False,
'static_vas': False,
}
ll : LowerLimbAtlas instance
Model of lower limb bone geometry and pose
osimmodel : opensim.Model instance
The opensim model instance to customise
"""
self.config = config
self.ll = ll
self.trcdata = landmarks
self.gias_osimmodel = None
self._workflow_location = None
if osimmodel is not None:
self.set_osim_model(osimmodel)
self._unit_scaling = dim_unit_scaling(
self.config['in_unit'], self.config['out_unit']
)
def set_osim_model(self, model):
self.gias_osimmodel = osim.Model(model=model)
def cust_pelvis(self):
pelvis = self.ll.models['pelvis']
# load the pelvis muscle attachment node numbers
with open(DATA_DIR + 'pelvisNodeNumbers.txt') as infile:
pelvisData = json.load(infile)
pelvisAttachmentNodeNums = list(pelvisData.values())
pelvisMuscleNames = list(pelvisData.keys())
pelvisMuscleNames = [str(item) for item in pelvisMuscleNames]
# This method appears to be taking quite a while to complete (like 5
# minutes), is this expected? This wasn't being used in musclecusthfm.
# the muscle attachments were selected an a 24x24 mesh
pelvisPoints, lhF = pelvis.gf.triangulate([24, 24])
# Align the discretised pelvis points and the muscle attachments to the
# opensims pelvis local coordinate system.
localPelvisPoints = pelvis.acs.map_local(pelvisPoints) / 1000
pelvisAttachments = localPelvisPoints[pelvisAttachmentNodeNums]
for i in range(len(pelvisMuscleNames)):
muscle = self.gias_osimmodel.muscles[str(pelvisMuscleNames[i])]
pathPoints = muscle.path_points
s = sorted(muscle.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPoints[s[0]].body.name == 'pelvis':
aSite = 0
elif pathPoints[s[-1]].body.name == 'pelvis':
aSite = -1
# update the location of the pathpoint
pp = pathPoints[s[aSite]]
pp.location = pelvisAttachments[i]
def cust_femur_l(self):
leftFemur = self.ll.models['femur-l']
# load in the femur muscle attachment node numbers
with open(DATA_DIR + 'leftFemurNodeNumbers.txt') as infile:
leftFemurData = json.load(infile)
leftFemurAttachmentNodeNums = list(leftFemurData.values())
leftFemurMuscleNames = list(leftFemurData.keys())
leftFemurMuscleNames = [str(item) for item in leftFemurMuscleNames]
# update the geometric field coordinate system to match opensims
update_femur_opensim_acs(leftFemur)
# the muscle attachments were selected an a 24x24 mesh
leftFemurPoints, lhF = leftFemur.gf.triangulate([24, 24])
# align the discretised femur points and the muscle attachments to the
# opensims femur local coordinate system
localLeftFemurPoints = leftFemur.acs.map_local(leftFemurPoints) / 1000
leftFemurAttachments = localLeftFemurPoints[
leftFemurAttachmentNodeNums]
for i in range(len(leftFemurMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[
str(leftFemurMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'femur_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'femur_l':
aSite = -1
# update the location of the pathpoint
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftFemurAttachments[i]
def cust_femur_r(self):
rightFemur = self.ll.models['femur-r']
rightFemur.side = 'right'
with open(DATA_DIR + 'rightFemurNodeNumbers.txt') as infile:
rightFemurData = json.load(infile)
rightFemurAttachmentNodeNums = list(rightFemurData.values())
rightFemurMuscleNames = list(rightFemurData.keys())
rightFemurMuscleNames = [str(item) for item in rightFemurMuscleNames]
# update the geometric field coordinate system to match opensims
update_femur_opensim_acs(rightFemur)
rightFemurPoints, rhF = rightFemur.gf.triangulate([24, 24])
localRightFemurPoints = rightFemur.acs.map_local(
rightFemurPoints) / 1000
rightFemurAttachments = localRightFemurPoints[
rightFemurAttachmentNodeNums]
# update attachments
for i in range(len(rightFemurMuscleNames)):
muscleRight = self.gias_osimmodel.muscles[
str(rightFemurMuscleNames[i])]
pathPointsRight = muscleRight.path_points
sR = sorted(muscleRight.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsRight[sR[0]].body.name == 'femur_r':
aSite = 0
elif pathPointsRight[sR[-1]].body.name == 'femur_r':
aSite = -1
ppR = pathPointsRight[sR[aSite]]
ppR.location = rightFemurAttachments[i]
def cust_tibia_l(self):
# The tibia, patella and fibula all use the same fieldwork model to
# align with opensim
leftTibFib = self.ll.models['tibiafibula-l']
leftPatella = self.ll.models['patella-l']
update_tibiafibula_opensim_acs(leftTibFib)
leftTib, leftFib = split_tibia_fibula_gfs(leftTibFib.gf)
# load in the tibia muscle attachment node numbers
with open(DATA_DIR + 'leftTibiaNodeNumbers.txt') as infile:
leftTibiaData = json.load(infile)
leftTibiaAttachmentNodeNums = list(leftTibiaData.values())
leftTibiaMuscleNames = list(leftTibiaData.keys())
leftTibiaMuscleNames = [str(item) for item in leftTibiaMuscleNames]
# load in the fibula muscle attachment node numbers
with open(DATA_DIR + 'leftFibulaNodeNumbers.txt') as infile:
leftFibulaData = json.load(infile)
leftFibulaAttachmentNodeNums = list(leftFibulaData.values())
leftFibulaMuscleNames = list(leftFibulaData.keys())
leftFibulaMuscleNames = [str(item) for item in leftFibulaMuscleNames]
# load in the patella muscle attachment node numbers
with open(DATA_DIR + 'leftPatellaNodeNumbers.txt') as infile:
leftPatellaData = json.load(infile)
leftPatellaAttachmentNodeNums = list(leftPatellaData.values())
leftPatellaMuscleNames = list(leftPatellaData.keys())
leftPatellaMuscleNames = [str(item) for item in leftPatellaMuscleNames]
leftTibiaPoints, lhF = leftTib.triangulate([24, 24])
leftFibulaPoints, lhF = leftFib.triangulate([24, 24])
leftPatellaPoints, lhf = leftPatella.gf.triangulate([24, 24])
localLeftTibiaPoints = leftTibFib.acs.map_local(leftTibiaPoints) / 1000
leftTibiaAttachments = localLeftTibiaPoints[
leftTibiaAttachmentNodeNums]
localLeftFibulaPoints = leftTibFib.acs.map_local(
leftFibulaPoints) / 1000
leftFibulaAttachments = localLeftFibulaPoints[
leftFibulaAttachmentNodeNums]
localLeftPatellaPoints = leftTibFib.acs.map_local(
leftPatellaPoints) / 1000
leftPatellaAttachments = localLeftPatellaPoints[
leftPatellaAttachmentNodeNums]
# update the tibia attachments
for i in range(len(leftTibiaMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[
str(leftTibiaMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'tibia_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'tibia_l':
aSite = -1
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftTibiaAttachments[i]
# update the fibula attachments
for i in range(len(leftFibulaMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[
str(leftFibulaMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'tibia_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'tibia_l':
aSite = -1
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftFibulaAttachments[i]
# update the patella attachments
for i in range(len(leftPatellaMuscleNames)):
muscleLeft = self.gias_osimmodel.muscles[
str(leftPatellaMuscleNames[i])]
pathPointsLeft = muscleLeft.path_points
sL = sorted(muscleLeft.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsLeft[sL[0]].body.name == 'tibia_l':
aSite = 0
elif pathPointsLeft[sL[-1]].body.name == 'tibia_l':
aSite = -1
ppL = pathPointsLeft[sL[aSite]]
ppL.location = leftPatellaAttachments[i]
def cust_tibia_r(self):
rightTibFib = self.ll.models['tibiafibula-r']
rightPatella = self.ll.models['patella-r']
update_tibiafibula_opensim_acs(rightTibFib)
rightTib, rightFib = split_tibia_fibula_gfs(rightTibFib.gf)
# load in the tibia attachment node numbers
with open(DATA_DIR + 'rightTibiaNodeNumbers.txt') as infile:
rightTibiaData = json.load(infile)
rightTibiaAttachmentNodeNums = list(rightTibiaData.values())
rightTibiaMuscleNames = list(rightTibiaData.keys())
rightTibiaMuscleNames = [str(item) for item in rightTibiaMuscleNames]
# load in the fibula attachment node numbers
with open(DATA_DIR + 'rightFibulaNodeNumbers.txt') as infile:
rightFibulaData = json.load(infile)
rightFibulaAttachmentNodeNums = list(rightFibulaData.values())
rightFibulaMuscleNames = list(rightFibulaData.keys())
rightFibulaMuscleNames = [str(item) for item in rightFibulaMuscleNames]
# load in the patella attachment node numbers
with open(DATA_DIR + 'rightPatellaNodeNumbers.txt') as infile:
rightPatellaData = json.load(infile)
rightPatellaAttachmentNodeNums = list(rightPatellaData.values())
rightPatellaMuscleNames = list(rightPatellaData.keys())
rightPatellaMuscleNames = [
str(item) for item in rightPatellaMuscleNames]
rightTibiaPoints, lhF = rightTib.triangulate([24, 24])
rightFibulaPoints, lhF = rightFib.triangulate([24, 24])
rightPatellaPoints, lhf = rightPatella.gf.triangulate([24, 24])
localRightTibiaPoints = rightTibFib.acs.map_local(
rightTibiaPoints) / 1000
rightTibiaAttachments = localRightTibiaPoints[
rightTibiaAttachmentNodeNums]
localRightFibulaPoints = rightTibFib.acs.map_local(
rightFibulaPoints) / 1000
rightFibulaAttachments = localRightFibulaPoints[
rightFibulaAttachmentNodeNums]
localRightPatellaPoints = rightTibFib.acs.map_local(
rightPatellaPoints) / 1000
rightPatellaAttachments = localRightPatellaPoints[
rightPatellaAttachmentNodeNums]
for i in range(len(rightTibiaMuscleNames)):
muscleRight = self.gias_osimmodel.muscles[
str(rightTibiaMuscleNames[i])]
pathPointsRight = muscleRight.path_points
sR = sorted(muscleRight.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsRight[sR[0]].body.name == 'tibia_r':
aSite = 0
elif pathPointsRight[sR[-1]].body.name == 'tibia_r':
aSite = -1
ppR = pathPointsRight[sR[aSite]]
ppR.location = rightTibiaAttachments[i]
for i in range(len(rightFibulaMuscleNames)):
muscleRight = self.gias_osimmodel.muscles[
str(rightFibulaMuscleNames[i])]
pathPointsRight = muscleRight.path_points
sR = sorted(muscleRight.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsRight[sR[0]].body.name == 'tibia_r':
aSite = 0
elif pathPointsRight[sR[-1]].body.name == 'tibia_r':
aSite = -1
ppR = pathPointsRight[sR[aSite]]
ppR.location = rightFibulaAttachments[i]
for i in range(len(rightPatellaMuscleNames)):
muscleRight = self.gias_osimmodel.muscles[
str(rightPatellaMuscleNames[i])]
pathPointsRight = muscleRight.path_points
sR = sorted(muscleRight.path_points.keys())
aSite = None
# aSite will be 0 if attachment is an origin and -1 if insertion
if pathPointsRight[sR[0]].body.name == 'tibia_r':
aSite = 0
elif pathPointsRight[sR[-1]].body.name == 'tibia_r':
aSite = -1
ppR = pathPointsRight[sR[aSite]]
ppR.location = rightPatellaAttachments[i]
def set_workflow_location(self, location):
self._workflow_location = location
def write_cust_osim_model(self):
self.gias_osimmodel.save(
os.path.join(self._workflow_location,
self.config['osim_output_dir'], OSIM_FILENAME)
)
def customise(self):
# Note: a number of PathPoints that were scaled in the previous plugin
# are also being scaled here. Are both of these necessary?
self.cust_pelvis()
self.cust_femur_l()
self.cust_tibia_l()
self.cust_femur_r()
self.cust_tibia_r()
# What is being done in the following methods that wasn't in the
# previous plugin or one of the cust (^) methods? They seem to be
# updating the same values that were updated earlier.
self.update_hip_muscles()
self.update_knee_muscles()
self.update_foot_muscles()
self.update_wrap_points()
# The gait2392 default marker set was comprehensively updated in the
# previous plugin. Many of the markers being added here appear to be
# duplicates of gait2392 markers. If we need to add any additional
# markers to the Model we should use this method.
# self.update_marker_set()
if self.config['update_max_iso_forces']:
self.update_max_iso_forces()
# Currently, none of the OFL and TSL values are being re-calculated
# after updating the PathPoints. They have been scaled in the previous
# plugin but could be done more accurately here.
if self.config['write_osim_file']:
self.write_cust_osim_model()
self.move_mesh_files()
def move_mesh_files(self):
output_directory = os.path.join(self._workflow_location, self.config['osim_output_dir'])
source_dir = os.path.join(self._workflow_location, '../output/Geometry')
target_dir = os.path.join(output_directory, './Geometry')
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(source_dir, target_dir)
# This method assumes the current max iso force is in mm and multiplies it
# to get the value in cm. I'm not sure it should be doing this (or not like
# this at least). It should depend on the plugin configuration, right?
def update_max_iso_forces(self):
osimModel = self.gias_osimmodel
subjectHeight = float(self.config['subject_height'])
subjectMass = float(self.config['subject_mass'])
# calculate muscle volumes using Handsfield (2014)
osimAbbr, muscleVolume = muscleVolumeCalculator(
subjectHeight, subjectMass)
# load Opensim model muscle set
allMuscles = osimModel.get_muscles()
allMusclesNames = list(range(allMuscles.getSize()))
oldValue = np.zeros([allMuscles.getSize(), 1])
optimalFibreLength = np.zeros([allMuscles.getSize(), 1])
penAngleAtOptFibLength = np.zeros([allMuscles.getSize(), 1])
for i in range(allMuscles.getSize()):
allMusclesNames[i] = allMuscles.get(i).getName()
oldValue[i] = allMuscles.get(i).getMaxIsometricForce()
optimalFibreLength[i] = allMuscles.get(i).getOptimalFiberLength()
penAngleAtOptFibLength[i] = np.rad2deg(
allMuscles.get(i).getPennationAngleAtOptimalFiberLength())
# convert opt. fibre length from [m] to [cm] to match volume units
# [cm^3]
# Shouldn't this (and the volume units) depend on the plugin config?
optimalFibreLength *= 100
allMusclesNamesCut = list(range(allMuscles.getSize()))
for i in range(len(allMusclesNames)):
# delete trailing '_r' or '_l'
currMuscleName = allMusclesNames[i][0:-2]
# split the name from any digit in its name and only keep the first
# string.
currMuscleName = re.split(r'(\d+)', currMuscleName)
currMuscleName = currMuscleName[0]
# store in cell
allMusclesNamesCut[i] = currMuscleName
# calculate ratio of old max isometric forces for
# multiple-lines-of-action muscles.
newAbsVolume = np.zeros([allMuscles.getSize(), 1])
fracOfGroup = np.zeros([allMuscles.getSize(), 1])
for i in range(allMuscles.getSize()):
currMuscleName = allMusclesNamesCut[i]
currIndex = [
j for j, x in enumerate(osimAbbr) if x == currMuscleName]
# currIndex = osimAbbr.index(currMuscleName)
if currIndex:
currValue = muscleVolume[currIndex]
newAbsVolume[i] = currValue
# The peroneus longus/brevis and the extensors (EDL, EHL) have to
# be treated seperatly as they are represented as a combined muscle
# group in Handsfield, 2014. The following method may not be the
# best!
if currMuscleName == 'per_brev' or currMuscleName == 'per_long':
currMuscleNameIndex = np.array([0, 0])
tmpIndex = [j for j, x in enumerate(
allMusclesNamesCut) if x == 'per_brev']
currMuscleNameIndex[0] = tmpIndex[0]
tmpIndex = [j for j, x in enumerate(
allMusclesNamesCut) if x == 'per_long']
currMuscleNameIndex[1] = tmpIndex[0]
currIndex = [j for j, x in enumerate(osimAbbr) if x == 'per_']
currValue = muscleVolume[currIndex]
newAbsVolume[i] = currValue
elif currMuscleName == 'ext_dig' or currMuscleName == 'ext_hal':
currMuscleNameIndex = np.array([0, 0])
tmpIndex = [j for j, x in enumerate(
allMusclesNamesCut) if x == 'ext_dig']
currMuscleNameIndex[0] = tmpIndex[0]
tmpIndex = [j for j, x in enumerate(
allMusclesNamesCut) if x == 'ext_hal']
currMuscleNameIndex[1] = tmpIndex[0]
currIndex = [j for j, x in enumerate(osimAbbr) if x == 'ext_']
currValue = muscleVolume[currIndex]
newAbsVolume[i] = currValue
else:
# find all instances of each muscle
currMuscleNameIndex = [j for j, x in enumerate(
allMusclesNamesCut) if x == currMuscleName]
# only require half of the results as we only need muscles from
# one side
currMuscleNameIndex = currMuscleNameIndex[0:int(len(
currMuscleNameIndex) / 2)]
# find how much of the total muscle volume this muscle contributes
fracOfGroup[i] = oldValue[i] / sum(oldValue[currMuscleNameIndex])
# calculate new maximal isometric muscle forces
specificTension = 61 # N/cm^2 from Zajac 1989
newVolume = fracOfGroup * newAbsVolume
# maxIsoMuscleForce = specificTension * (newVolume/optimalFibreLength)
# * np.cos(math.degrees(penAngleAtOptFibLength))
# Update muscles of loaded model (in workspace only!), change model
# name and print new osim file.
maxIsoMuscleForce = np.zeros([allMuscles.getSize(), 1])
for i in range(allMuscles.getSize()):
maxIsoMuscleForce[i] = specificTension * (
newVolume[i] / optimalFibreLength[i]) * np.cos(
math.radians(penAngleAtOptFibLength[i]))
# only update, if new value is not zero. Else do not override the
# original value.
if maxIsoMuscleForce[i] != 0:
allMuscles.get(i).setMaxIsometricForce(maxIsoMuscleForce[i][0])
def update_hip_muscles(self):
muscleNames = ['glut_max1_l', 'glut_max2_l', 'glut_max3_l', 'peri_l',
'iliacus_l', 'psoas_l', 'glut_max1_r', 'glut_max2_r',
'glut_max3_r', 'peri_r', 'psoas_r', 'iliacus_r']
joint = 'hip'
body = 'pelvis'
# joint - the joint that the muscles cross (currently only works for
# muscles that cross a single joint)
# body - the body that the origins of the muscles are attached to
# this has only been tested for muscles that cross the hip
# load in the original model
mO = osim.Model(TEMPLATE_OSIM_PATH)
mO.init_system()
# for each muscle
for i in range(len(muscleNames)):
# display the pathpoints for both muscles
muscleO = mO.muscles[muscleNames[i]]
muscle = self.gias_osimmodel.muscles[muscleNames[i]]
side = muscle.name[-2:]
# find the transformation between the two bodies the muscles are
# attached to
transO = mO.joints[joint + side].locationInParent
trans = self.gias_osimmodel.joints[joint + side].locationInParent
pathPointsO = copy.copy(muscleO.path_points)
pathPoints = copy.copy(muscle.path_points)
for j in range(len(pathPointsO)):
if list(pathPointsO.values())[j].body.name == body:
list(pathPointsO.values())[j].location -= transO
list(pathPoints.values())[j].location -= trans
# ################################################## #
# ###############Transform Points################### #
# ################################################## #
# find the path point names for the origin and the insertion
sortedKeys = sorted(muscle.path_points.keys())
# the origin will be the first sorted key and the insertion last
orig = sortedKeys[0]
ins = sortedKeys[-1]
# find vector between origins and insertions
v1 = pathPoints[orig].location - pathPointsO[orig].location
v2 = pathPoints[ins].location - pathPointsO[ins].location
# the new points are going to be found by translating the points
# based on a weighting mulitplied by these two vectors
# the weighting will be how far along the muscle the point it
# find the total muscle length
segments = np.zeros([len(pathPointsO) - 1, 3])
lengths = np.zeros(len(pathPointsO) - 1)
for j in range(len(pathPointsO) - 1):
segments[j] = pathPointsO[muscle.name + '-P' + str(
j + 2)].location - pathPointsO[
muscle.name + '-P' + str(j + 1)].location
lengths[j] = np.linalg.norm(segments[j])
Tl = np.sum(lengths)
# Define the weighting function
# for the points calculate the magnitude of the new vector and at
# what angle
for j in range(len(pathPointsO) - 2):
# the second pathpoint will be the first via point
p = pathPointsO[muscle.name + '-P' + str(j + 2)].location
# find how far along the muscle the point is
dl = np.sum(lengths[:j + 1])
# create the new points by finding adding a weighted vector
pNew = ((dl / Tl) * v2) + ((1 - dl / Tl) * v1) + p
# update the opensim model
muscle.path_points[muscle.name + '-P' + str(
j + 2)].location = pNew
# tranform the points back to the main body local coordinate system
for j in range(len(pathPoints)):
if list(pathPoints.values())[j].body.name == body:
list(pathPoints.values())[j].location += trans
def update_knee_muscles(self):
muscleNames = ['bifemlh_l', 'semimem_l', 'semiten_l', 'sar_l', 'tfl_l',
'grac_l', 'rect_fem_l', 'bifemlh_r', 'semimem_r',
'semiten_r', 'sar_r', 'tfl_r', 'grac_r', 'rect_fem_r',
'bifemsh_l', 'vas_med_l', 'vas_int_l', 'vas_lat_l',
'bifemsh_r', 'vas_med_r', 'vas_int_r', 'vas_lat_r',
'med_gas_l', 'lat_gas_l', 'med_gas_r', 'lat_gas_r']
# This is being done multiple times. Should move outside this method.
# load in the original model
mO = osim.Model(TEMPLATE_OSIM_PATH)
mO.init_system()
for i in range(len(muscleNames)):
# display the pathpoints for both muscles
muscleO = mO.muscles[muscleNames[i]]
muscle = self.gias_osimmodel.muscles[muscleNames[i]]
pathPointsO = copy.copy(muscleO.path_points)
pathPoints = copy.copy(muscle.path_points)
for j in range(len(pathPointsO)):
list(pathPointsO.values())[j].location += local_osim_2_global(
list(pathPointsO.values())[j].body.name, mO)
list(pathPoints.values())[j].location += local_osim_2_global(
list(pathPoints.values())[j].body.name,
self.gias_osimmodel)
# find the path point names for the origin and the insertion
sortedKeys = sorted(muscle.path_points.keys())
# the origin will be the first sorted key and the insertion last
orig = sortedKeys[0]
ins = sortedKeys[-1]
# find vector between origins and insertions
v1 = pathPoints[orig].location - pathPointsO[orig].location
v2 = pathPoints[ins].location - pathPointsO[ins].location
# the new points are going to be found by translating the points
# based on a weighting mulitplied by these two vectors
# the weighting will be how far along the muscle the point it
# find the total muscle length
segments = np.zeros([len(pathPointsO) - 1, 3])
lengths = np.zeros(len(pathPointsO) - 1)
for j in range(len(pathPointsO) - 1):
segments[j] = pathPointsO[muscle.name + '-P' + str(
j + 2)].location - pathPointsO[
muscle.name + '-P' + str(j + 1)].location
lengths[j] = np.linalg.norm(segments[j])
Tl = np.sum(lengths)
# Define the weighting function for the points calculate the
# magnitude of the new vector and at what angle
for j in range(len(pathPointsO) - 2):
# the second pathpoint will be the first via point
p = pathPointsO[muscle.name + '-P' + str(j + 2)].location
# find how far along the muscle the point is
dl = np.sum(lengths[:j + 1])
# create the new points by finding adding a weighted vector
pNew = ((dl / Tl) * v2) + ((1 - dl / Tl) * v1) + p
# update the opensim model
muscle.path_points[muscle.name + '-P' + str(
j + 2)].location = pNew
# tranform the pelvis points back to the pelvis region
for j in range(len(pathPoints)):
list(pathPoints.values())[j].location -= local_osim_2_global(
list(pathPoints.values())[j].body.name,
self.gias_osimmodel)
def update_foot_muscles(self):
muscleNames = ['ext_dig_l', 'ext_hal_l', 'flex_dig_l', 'flex_hal_l',
'per_brev_l', 'per_long_l', 'per_tert_l', 'tib_ant_l',
'tib_post_l', 'ext_dig_r', 'ext_hal_r', 'flex_dig_r',
'flex_hal_r', 'per_brev_r', 'per_long_r', 'per_tert_r',
'tib_ant_r', 'tib_post_r']
# load in the original model
mO = osim.Model(TEMPLATE_OSIM_PATH)
mO.init_system()
for i in range(len(muscleNames)):
# get the pathPoints for the old and new muscle
muscleO = mO.muscles[muscleNames[i]]
muscle = self.gias_osimmodel.muscles[muscleNames[i]]
side = muscle.name[-1]
# find the transformation between the two bodies the muscles are
# attached to
transO = mO.joints['ankle_' + side].locationInParent + mO.joints[
'subtalar_' + side].locationInParent
trans = self.gias_osimmodel.joints['ankle_' + side]\
.locationInParent + self.gias_osimmodel.joints[
'subtalar_' + side].locationInParent
pathPointsO = copy.copy(muscleO.path_points)
pathPoints = copy.copy(muscle.path_points)
# ################################################## #
# ###############Transform Points################### #
# ################################################## #
# find the path point names for the origin and the insertion
sortedKeys = sorted(muscle.path_points.keys())
# the origin will be the first sorted key
orig = sortedKeys[0]
ins = None
# find the first point on the calcn
for j in sortedKeys:
if pathPoints[j].body.name == 'calcn_' + side:
ins = j
break
endPP = sortedKeys.index(ins)
for j in range(endPP + 1):
if pathPointsO[sortedKeys[j]].body.name == 'calcn_' + side:
pathPointsO[sortedKeys[j]].location += transO
pathPoints[sortedKeys[j]].location += trans
# find vector between origins and insertions
v1 = pathPoints[orig].location - pathPointsO[orig].location
v2 = pathPoints[ins].location - pathPointsO[ins].location
# the new points are going to be found by translating the points
# based on a weighting mulitplied by these two vectors
# the weighting will be how far along the muscle the point it
# find the total muscle length
segments = np.zeros([endPP, 3])
lengths = np.zeros(endPP)
for j in range(endPP):
segments[j] = pathPointsO[muscle.name + '-P' + str(
j + 2)].location - pathPointsO[
muscle.name + '-P' + str(j + 1)].location
lengths[j] = np.linalg.norm(segments[j])
Tl = | np.sum(lengths) | numpy.sum |
#######################################
# load mnist #
import mnist
import numpy as np
def normalize(img):
fac = 0.99 / 255
return img * fac + 0.01
def digit_to_layer(digit):
return (np.arange(10) == digit).astype(np.float)
train_images = np.array([normalize(img) for img in mnist.train_images()])
train_labels = np.array([digit_to_layer(digit) for digit in mnist.train_labels()])
test_images = np.array([normalize(img) for img in mnist.test_images()])
test_labels = np.array([digit_to_layer(digit) for digit in mnist.test_labels()])
###
import math
from functools import reduce
padding = 'valid'
padding = 'same'
padding = 'full'
# I x I x C
# O x O x K
def init_tuple_counter(count_to: tuple) -> tuple:
return tuple(np.zeros(len(count_to.shape), dtype=int))
def adder(counter: tuple, max: tuple) -> tuple:
if counter == max:
return counter
counter_array = np.array(counter)
length = len(counter_array)
carry = True
for i in range(length - 1, -1, -1):
counter_array[i] = counter_array[i] + 1
carry = False
if counter_array[i] > max[i]:
counter_array[i] = 0
carry = True
if not carry:
break
counted = [max[:-1] == counter_array[:-1]]
if carry and counted:
counter_array = max
return tuple(counter_array)
def conv2d(input: np.array, output: np.array, filters: np.array, stride: tuple([int, int]) = (1, 1)) \
-> np.array:
## padding needs to be implemented
## proper strides
kernel_y = len(filters)
kernel_x = len(filters[0])
kernel_channels = len(filters[0][0])
num_filters = len(filters[0][0][0])
batch_shape = input.shape[:-3]
layer_shape = input.shape[-3:]
layer_height = layer_shape[0]
layer_width = layer_shape[1]
layer_channel = layer_shape[2]
stride_x = stride[0]
stride_y = stride[1]
padding = 0
## assert padding is valid I x I x K
conv_out_height = int(((layer_height - kernel_y + 2 * padding) / stride_y)) \
+ 1
conv_out_width = int(((layer_width - kernel_x + 2 * padding) / stride_x)) \
+ 1
conv_shape = batch_shape + (conv_out_height, conv_out_width, num_filters)
# conv_out = np.ndarray(shape=conv_shape)
batch_idx = np.zeros(len(batch_shape), dtype=int)
while batch_idx != batch_shape: ## probably needs to be changed
layer = input[tuple(batch_idx)]
for y_idx in range(0, conv_out_height):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, conv_out_width):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
kernel = layer[y_start:y_end, x_start:x_end]
for filter_idx in range(num_filters):
filter = filters[:, :, :, filter_idx]
multi = np.multiply(kernel, filter)
product_idx = (y_idx, x_idx, filter_idx)
output[tuple(batch_idx) + product_idx] = np.sum(multi)
batch_idx = adder(batch_idx, batch_shape)
return output
def conv_output_size(layer_dimensions: tuple, kernel_dimensions: tuple,
stride_dimensionsensions: tuple, padding: int):
return (int(((layer_dimensions[0] - kernel_dimensions[0] + 2 * padding) \
/ stride_dimensionsensions[0])) + 1,
int(((layer_dimensions[1] - kernel_dimensions[1] + 2 * padding) \
/ stride_dimensionsensions[1])) + 1,
kernel_dimensions[3])
def generate_conv2d_filters(kernel_dimensions: tuple, k: float = 2.0) -> np.array:
kernel_y = kernel_dimensions[0]
kernel_x = kernel_dimensions[1]
kernel_channels = kernel_dimensions[2]
num_filters = kernel_dimensions[3]
filters = np.ndarray(shape=kernel_dimensions)
filter_shape = tuple([kernel_y, kernel_x, kernel_channels])
nl = kernel_x * kernel_y * kernel_channels
std = math.sqrt(k / nl)
for filter_idx in range(num_filters):
filter = np.random.normal(scale=std, size=nl)
filter = filter.reshape(filter_shape)
filters[:, :, :, filter_idx] = filter
return filters
def lif_neuron(Vm: float, V_reset: float, V_th: float, tau_m: float, fire=True,
leaky=True) -> np.array:
if Vm >= V_th and fire:
spike = 1
Vm = V_reset
else:
spike = 0
if leaky:
Vm = Vm * math.exp(-1 / tau_m)
return [Vm, spike]
def flatten(input: np.array, output: np.array, flatten_dim: int):
self.input_shape
batch_dimensions = input.shape[:flatten_dim]
flattened_dimension = tuple([math.prod(input.shape[flatten_dim:])])
output = np.reshape(input, batch_dimensions + flattened_dimension)
return output
def lif_neuron_pool(Vin: np.array,
Vout: np.array,
spike_out: np.array,
Vreset: float = 0,
Vth: float = 0.75,
tau_m: int = 100,
fire: bool = True,
leaky: bool = True,
time_index: int = 0) -> np.array:
# [batch][time][spike_train]
# [batch][ Vin ]
# adequate dimensions to process
# a dimensions to
# assert (len(Vin.shape[-4]) > 2)
#if (Vin != NULL):
# s = 1 # TODO: implement smth here
# generate output arrays
# Vout = np.zero(shape=(Vin.shape))
# spike_out = np.zero(shape=(Vin.shape))
assert(Vin.shape == Vout.shape)
# process batches
batch_dimensions = Vin.shape[:max(time_index-1,0)]
spike_train_length = Vin.shape[time_index]
membrane_dimensions = Vin.shape[time_index+1:]
for batch_idx in np.ndindex(batch_dimensions):
for neuron_idx in np.ndindex(membrane_dimensions):
for t_idx in range(1, spike_train_length):
# membrane voltage for this step
t_current = batch_idx + tuple([t_idx]) + neuron_idx
t_previous = batch_idx + tuple([t_idx - 1]) + neuron_idx
Vm = Vin[t_current] + Vout[t_previous]
# simulate lif-neuron
[Vout[t_current], spike_out[t_current]] = lif_neuron(Vm, Vreset, Vth, tau_m, fire, leaky)
return [Vout, spike_out]
def generate_spike_train(p: float, t: int) -> np.array:
dist = np.random.uniform(1, 0, t)
return np.array([int(item < p) for item in dist])
def generate_layer_spike_train(layer: np.array, train_length: int):
layer_height = len(layer)
layer_width = len(layer[0])
spike_layer = np.ndarray(shape=(train_length, layer_height, layer_width, 1))
for y in range(0, layer_height):
for x in range(0, layer_width):
train = np.array(generate_spike_train(layer[y][x], train_length))
for t in range(0, train_length):
spike_layer[t, y, x, 0] = train[t]
return spike_layer
def avg_pool(input: np.array, output:np.array, kernel_size: tuple([int, int]) = (2, 2), stride: tuple([int, int]) = (1, 1)) -> np.array:
pool = output
## padding needs to be implemented
## proper strides
kernel_y = kernel_size[1]
kernel_x = kernel_size[0]
batch_shape = input.shape[:-3]
layer_shape = input.shape[-3:]
layer_height = layer_shape[0]
layer_width = layer_shape[1]
layer_channel = layer_shape[2]
stride_x = stride[0]
stride_y = stride[1]
padding = 0
pool_height = int(((layer_height - kernel_y + 2 * padding) / stride_y)) + 1
pool_width = int(((layer_width - kernel_x + 2 * padding) / stride_x)) + 1
pool_shape = batch_shape + (pool_height, pool_width, layer_channel)
# pool = np.ndarray(shape=pool_shape)
# TODO: Update this code
batch_idx = np.zeros(len(batch_shape), dtype=int)
while batch_idx != batch_shape:
layer = input[tuple(batch_idx)]
for y_idx in range(0, pool_height):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, pool_width):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
for channel_idx in range(0, layer_channel):
kernel = layer[y_start:y_end, x_start:x_end, channel_idx]
product = np.sum(kernel) / kernel.size
product_idx = (y_idx, x_idx, channel_idx)
pool[tuple(batch_idx) + product_idx] = product
batch_idx = adder(batch_idx, batch_shape)
return pool
def generate_dense_layer_weights(input_dimensions: tuple, num_neuron_output: int, k: float = 2.0) -> np.array:
axons_per_neuron = math.prod(input_dimensions)
synapses = np.ndarray(shape=(num_neuron_output, axons_per_neuron))
nl = axons_per_neuron
std = math.sqrt(k / nl)
for i in range(num_neuron_output):
synapses[i] = np.random.normal(scale=std, size=nl)
return synapses
def dense_forward(input_neurons: np.array, output_neurons: np.array, weights: np.array) -> np.array:
ins = input_neurons.shape
ons = output_neurons.shape
ws = weights.shape
# [batch][spike time]
batch_dimensions = input_neurons.shape[:-1]
# [][]
num_input_neurons = weights.shape[1]
num_output_neurons = weights.shape[0]
#[neuron y][neuron x][channel]
for batch_idx in np.ndindex(batch_dimensions):
for output_neuron_idx in range(num_output_neurons):
# action_potential = 0
# dot product
# for input_neuron_idx in range(num_input_neurons):
# ax = input_neurons[batch_idx][input_neuron_idx]
# wx = weights[output_neuron_idx][input_neuron_idx]
# action_potential = action_potential + ax*wx
output_neurons[batch_idx][output_neuron_idx] = np.dot(input_neurons[batch_idx], weights[output_neuron_idx])
return output_neurons
def generate_membrane(membrane_dimensions: tuple, value: float = 0.0):
membrane = np.ndarray(shape=membrane_dimensions)
membrane.fill(value)
return membrane
# This gains the term da_lif / d_net
def differentiate_spike_train(spike_train, Vth = 1):
# sum of decay over time
gamma = sum(spike_train)
if gamma == 0:
return 0
tau_m = len(spike_train)
total_decay = 0
t = tk = 1
for activation in spike_train:
if activation:
if t != tk:
decay = math.exp(-(t - tk) / tau_m)
total_decay = total_decay - (1 / tau_m) * decay
tk = t + 1
t = t + 1
return (1/Vth) * (1 + (1/gamma) * total_decay)
class Layer:
def __init__(self):
self.trainable = True
self.input_shape = None
self.output_shape = None
def count_parameters(self):
raise NotImplementedError()
def compute_output_shape(self, input_shape):
raise NotImplementedError()
def forward_propagate(self, A):
raise NotImplementedError()
def backward_propagate(self, dZ, cache):
raise NotImplementedError()
def get_weights(self):
raise NotImplementedError()
def set_weights(self, weights):
raise NotImplementedError()
def build(self, input_shape):
self.input_shape = input_shape
class Dropout(Layer):
def __init__(self, probability):
super().__init__()
self.probability = probability
self.mask = None
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = input_shape
self.reset()
def reset(self):
self.mask = np.random.binomial(1, 1-self.probability, size=self.output_shape)
def forward_propagate(self, A):
masked = np.multiply(self.mask, A)
cache = [{"mask" : self.mask,
"output" : masked}]
return masked, cache
def backward_propagate(self, dZ, cache):
assert(dZ.shape == self.mask.shape)
return np.multiply(self.mask * dZ)
def compute_output_shape(self, input_shape):
return input_shape
class AveragePool2D(Layer):
def __init__(self, kernel_size, strides):
super().__init__()
assert (len(kernel_size) == 2)
assert (len(strides) == 2)
self.kernel_size = kernel_size
self.strides = strides
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
# incorrect input dimensions
assert(len(input_shape) >= 3)
# dimensions for sample / instance in the input
sample_shape = input_shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
kernel_x = self.kernel_size[1]
kernel_y = self.kernel_size[0]
stride_x = self.strides[0]
stride_y = self.strides[1]
padding = 0
return (int(((sample_y - kernel_y + 2 * padding) / stride_y)) + 1,
int(((sample_x - kernel_x + 2 * padding) / stride_x)) + 1,
sample_channels)
def forward_propagate(self, A):
# padding needs to be implemented
# separate batches
batch_shape = A.shape[:-3]
# unpack sample shape
sample_shape = A.shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
# unpack kernel
kernel_y = self.kernel_size[0]
kernel_x = self.kernel_size[1]
# unpack stride shape
stride_y = self.strides[0]
stride_x = self.strides[1]
# unpack pooling layer shape
pool_shape = self.compute_output_shape(A.shape)
pool_y = pool_shape[0]
pool_x = pool_shape[1]
# initialize the output convolution
Z_shape = batch_shape + pool_shape
Z = np.zeros(shape=Z_shape)
Z.fill(-9e99)
# begin pooling
for batch_idx in np.ndindex(batch_shape):
layer = A[batch_idx]
for y_idx in range(0, pool_y):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, pool_x):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
for channel_idx in range(0, sample_channels):
kernel = layer[y_start:y_end, x_start:x_end, channel_idx]
product = np.sum(kernel) / kernel.size
product_idx = (y_idx, x_idx, channel_idx)
Z[batch_idx + product_idx] = product
return Z, None
class Convolution2D(Layer):
def __init__(self, number_of_filters, kernel_size, strides):
super().__init__()
self.number_of_filters = number_of_filters
self.kernel_size = kernel_size
self.strides = strides
self.filters = []
self.kernel_shape = []
self.padding = 0
def build(self, input_shape):
k = 2
sample_shape = input_shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
self.input_shape = sample_shape
kernel_y = self.kernel_size[0]
kernel_x = self.kernel_size[1]
kernel_channels = sample_channels
kernel_filters = self.number_of_filters
self.kernel_shape = tuple([kernel_y, kernel_x, kernel_channels, kernel_filters])
self.output_shape = self.compute_output_shape(sample_shape)
self.filters = np.ndarray(shape=self.kernel_shape)
filter_shape = tuple([kernel_y, kernel_x, kernel_channels])
nl = kernel_x * kernel_y * kernel_channels
std = math.sqrt(k / nl)
for filter_idx in range(self.number_of_filters):
filter = np.random.normal(scale=std, size=nl)
filter = filter.reshape(filter_shape)
self.filters[:, :, :, filter_idx] = filter
def compute_output_shape(self, input_shape):
sample_shape = input_shape[-3:]
batch_shape = input_shape[:-3]
input_x = sample_shape[1]
input_y = sample_shape[0]
kernel_x = self.kernel_size[1]
kernel_y = self.kernel_size[0]
stride_x = self.strides[1]
stride_y = self.strides[0]
padding = 0
return (int(((input_y - kernel_y + 2 * padding) / stride_y)) + 1,
int(((input_x - kernel_x + 2 * padding) / stride_x)) + 1,
self.number_of_filters)
def forward_propagate(self, A):
# padding needs to be implemented
# separate batches
batch_shape = A.shape[:-3]
# unpack sample shape
sample_shape = A.shape[-3:]
sample_y = sample_shape[0]
sample_x = sample_shape[1]
sample_channels = sample_shape[2]
assert(sample_shape == self.input_shape)
# unpack kernel
kernel_y = self.kernel_size[0]
kernel_x = self.kernel_size[1]
# unpack stride shape
stride_y = self.strides[0]
stride_x = self.strides[1]
# unpack convolution
conv_shape = self.compute_output_shape(A.shape)
conv_y = conv_shape[0]
conv_x = conv_shape[1]
# initialize the output convolution
output_shape = batch_shape + conv_shape
output = np.zeros(shape= output_shape)
output.fill(-9e99)
# begin convolution
for batch_idx in np.ndindex(batch_shape):
layer = A[batch_idx]
for y_idx in range(0, conv_y):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, conv_x):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
kernel = layer[y_start:y_end, x_start:x_end]
for filter_idx in range(self.number_of_filters):
filter = self.filters[:, :, :, filter_idx]
multi = np.multiply(kernel, filter)
product_idx = (y_idx, x_idx, filter_idx)
output[batch_idx + product_idx] = np.sum(multi)
return output, None
def backward_propagate(self, dZ, cache):
raise NotImplementedError()
def get_weights(self):
return self.filters
def set_weights(self, weights):
self.filters = weights
class Flatten(Layer):
def __init__(self):
super().__init__()
self.input_shape = None
self.output_shape = None
def compute_output_shape(self, sample_shape):
return tuple([math.prod(sample_shape)])
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = self.compute_output_shape(input_shape)
def forward_propagate(self, A):
sample_dimensions = len(self.input_shape)
sample_shape = A.shape[-sample_dimensions:]
flattened_shape = tuple([math.prod(sample_shape)])
batch_shape = A.shape[:-sample_dimensions]
return np.reshape(A, batch_shape + flattened_shape), None
def backward_propagate(self, dZ, cache):
batch_shape = input_shape[:-3]
return np.reshape(dZ, batch_shape + self.input_shape)
def get_weights(self):
return []
def set_weights(self, weights):
pass
class Dense(Layer):
def __init__(self, num_outputs):
super().__init__()
self.num_outputs = num_outputs
self.num_inputs = 0
self.weights = np.zeros(0)
self.output_shape = tuple([num_outputs])
def build(self, input_shape, k = 2):
self.input_shape = input_shape
sample_shape = input_shape[-3:]
self.num_inputs = math.prod(sample_shape)
self.weights = np.ndarray(shape=(self.num_outputs, self.num_inputs))
nl = self.num_inputs
std = math.sqrt(k / nl)
for i in range(self.num_outputs):
self.weights[i] = np.random.normal(scale=std, size=nl)
def set_weights(self, weights):
assert(len(weights.shape) == 2)
self.num_inputs = weights.shape[1]
self.num_outputs = weights.shape[0]
return self.weights
def get_weights(self):
return self.weights
def compute_output_shape(self, input_shape):
return tuple([self.num_outputs])
def forward_propagate(self, A):
print(A.shape)
print(self.weights.shape)
num_input_dimensions = len(self.input_shape)
sample_shape = A.shape[-num_input_dimensions:]
assert(self.input_shape == sample_shape)
batch_shape = A.shape[:-num_input_dimensions]
output_shape = batch_shape + self.output_shape
Z = np.zeros(shape=output_shape)
Z.fill(-9e99)
for batch_idx in np.ndindex(batch_shape):
Z[batch_idx] = self.weights @ A[batch_idx]
cache = { 'A' : A }
return Z, cache
def backward_propagate(self, dZ, cache):
A_prev = cache['A']
batch_shape = dZ.shape[:-1]
dW = np.ndarray(shape=batch_shape + self.weights.shape)
db = None
dA = np.ndarray(shape=batch_shape + A_prev.shape)
for batch_idx in np.ndindex(batch_shape):
DW = np.dot(self.weights.T, dZ[batch_idx])
DA = np.dot(dZ[batch_idx], A_prev[batch_idx].T)
dW[batch_idx] = np.dot(self.weights.T, dZ[batch_idx])
db = None
dA[batch_idx] = np.dot(dZ[batch_idx], A.T)
assert (dA.shape == A.shape)
assert (dW.shape == self.weights.shape)
return dZ, dW, db
class Membrane:
def __init__(self):
pass
def reset(self):
pass
def activate(self, Z):
return 0
def differentiate(self, dA, cache):
return 0
class LeakyIntegrateAndFire(Membrane):
def __init__(self, Vreset: float, Vth: float, tau_m: float, fire=True, leaky=True):
super().__init__()
self.Vreset = Vreset
self.Vth = Vth
self.tau_m = tau_m
self.fire = fire
self.leaky = leaky
self.input_shape = None
self.__num_input_dimensions = None
self.output_shape = None
def build(self, input_shape):
self.input_shape = input_shape
self.__num_input_dimensions = len(self.input_shape)
self.output_shape = input_shape
def neuron_activation(self, Vm):
spike = None
if Vm >= self.Vth and self.fire:
spike = 1
self.Vm = self.Vreset
else:
spike = 0
if self.leaky:
Vm = Vm * math.exp(-1 / self.tau_m)
# TODO: 1 / t needs to be implemented
return [Vm, spike]
def activate(self, Vin):
# this function can be optimised given that only the final Vm is required
# assert (Vin.shape == Vout.shape)
batch_shape = Vin.shape[:-self.__num_input_dimensions-1]
spike_train_length = Vin.shape[-self.__num_input_dimensions-1]
membrane_shape = Vin.shape[-self.__num_input_dimensions:]
activation = batch_shape + tuple([spike_train_length]) + membrane_shape
activation_shape = activation
Vout = np.ndarray(shape=(activation_shape))
Vout.fill(self.Vreset)
Vp = np.ndarray(shape=(batch_shape + membrane_shape))
Vp.fill(-9e99)
spike_train = np.ndarray(shape=(activation_shape))
spike_train.fill(0)
t_current = None
t_previous = None
for batch_idx in np.ndindex(batch_shape):
for neuron_idx in np.ndindex(membrane_shape):
for t_idx in range(1, spike_train_length):
# membrane voltage for this step
t_current = batch_idx + tuple([t_idx]) + neuron_idx
t_previous = batch_idx + tuple([t_idx - 1]) + neuron_idx
Vm = Vin[t_current] + Vout[t_previous]
# simulate lif-neuron
[Vout[t_current], spike_train[t_current]] = self.neuron_activation(Vm)
# store the final membrane voltage
Vp_idx = batch_idx + neuron_idx
Vp[Vp_idx] = Vout[t_current]
cache = {
'Vp' : Vp,
'Vout' : Vout,
'spike_train' : spike_train,
'tau_m' : tau_m
}
return spike_train, cache
@staticmethod
def __compute_spike_train_decay(spike_train):
# sum of decay over time
total_decay = 0
gamma = sum(spike_train)
if gamma == 0:
return [total_decay, gamma]
total_decay = 0
t = tk = 1
for activation in spike_train:
if activation:
if t != tk:
decay = math.exp(-(t - tk) / tau_m)
total_decay = total_decay - (1 / tau_m) * decay
tk = t + 1
t = t + 1
return [total_decay, gamma]
def __diff_LIF(self, dA, cache):
Vp = cache['Vp']
spike_trains = cache['spike_train']
tau_m = cache['tau_m']
batch_shape = Vp.shape[:-self.__num_input_dimensions]
membrane_shape = Vp.shape[-self.__num_input_dimensions:]
dZ_shape = batch_shape + membrane_shape
dZ = np.ndarray(shape=dZ_shape)
dZ.fill(-9e99)
for batch_idx in np.ndindex(batch_shape):
for neuron_idx in np.ndindex(membrane_shape):
idx = batch_idx + neuron_idx
spike_train = spike_trains[idx]
[total_decay, gamma] = LeakyIntegrateAndFire.__compute_spike_train_decay(spike_train)
dZ[idx] = dA[idx] * (1 / self.Vth) * (1 + (1 / gamma) * total_decay)
return dZ
def __diff_LI(self, dA, cache):
Vp = cache['Vp']
spike_trains = cache['spike_train']
tau_m = cache['tau_m']
batch_shape = Vp.shape[:-self.__num_input_dimensions]
membrane_shape = Vp.shape[-self.__num_input_dimensions:]
dZ_shape = batch_shape + membrane_shape
dZ = np.ndarray(shape=dZ_shape, dtype=np.float64)
dZ.fill(-9e99)
for batch_idx in np.ndindex(batch_shape):
for neuron_idx in np.ndindex(membrane_shape):
idx = batch_idx + neuron_idx
dZ[idx] = (1/tau_m) * (Vp[idx]) * dA[idx]
return dZ
def __diff_IF(self, dA, cache):
return None
def __diff_I(self, dA, cache):
return None
def differentiate(self, dA, cache):
if self.leaky:
if self.fire:
return self.__diff_LIF(dA, cache)
else:
return self.__diff_LI(dA, cache)
else:
if self.fire:
return self.__diff_IF(dA, cache)
else:
return self.__diff_I(dA, cache)
def get_output_shape(self):
return self.output_shape
# the idea of this model is to process everything LIF
class SpikingNeuralNetwork:
@staticmethod
def __traverse_batch(start, end, step):
i = start
while i < end:
yield i
i += step
yield end
def __init__(self):
self.__layers = []
self.__membrane = []
pass
def build(self, input_shape):
# set input shape for model
self.input_shape = input_shape
self.tau_m = input_shape[0]
for layer_idx in range(0, len(self.__layers)):
layer = self.__layers[layer_idx]
membrane = self.__membrane[layer_idx]
print(str(layer_idx) + ":" + str(layer))
layer.build(input_shape=input_shape)
input_shape = layer.compute_output_shape(input_shape)
if membrane is not None:
membrane.build(input_shape)
# last layers output shape to models output shape
self.output_shape = input_shape
def add_layer(self, layer: Layer, activation: Membrane = None):
self.__layers.append(layer)
self.__membrane.append(activation)
def forward_propagation(self, X):
caches = []
A = X
for layer_idx in range(0, len(self.__layers)):
layer = self.__layers[layer_idx]
membrane = self.__membrane[layer_idx]
print(layer)
Z, linear_cache = layer.forward_propagate(A)
print("Z: " + str(np.amax(Z)))
if membrane is not None:
print(membrane)
A, activation_cache = membrane.activate(Z)
print("A: " + str(np.amax(A)))
cache = { 'linear_cache' : linear_cache,
'activation_cache' : activation_cache }
caches.append({ 'A': A,
'Z': Z,
'cache': cache})
else:
print("Z: " + str(np.amax(Z)))
A = Z
cache = { 'linear_cache' : linear_cache,
'activation_cache' : None }
caches.append({ 'A': None,
'Z': Z,
'cache': cache})
return A, caches
def compute_cost(self, A, Y):
return 0.5 * np.sum(np.power((A - Y), 2))
def compute_loss(self, A, Y):
# np.mean(np.square(Y - A), axis=-2) <- MSE loss
return Y - A
def backward_propagation(self, AL, caches, Y):
grads = []
L = len(self.__layers)
m = AL.shape[1] ## figure this out
# gradients
dZ, dW, db = (None, None, None)
# derivative of activation in final layer
dAL = self.compute_loss(AL, Y)
grad = [
{
"dZ": None,
"dA": dAL,
"dW": None,
"db": None
}
]
grads.insert(0, grad)
# backwards propagating the loss
for layer_idx in range(L-1, 0, -1):
layer = self.__layers[layer_idx]
A, Z, cache = (caches[layer_idx]['A'], caches[layer_idx]['Z'], caches[layer_idx]['cache'])
linear_cache, activation_cache = (cache['linear_cache'], cache['activation_cache'])
membrane = self.__membrane[layer_idx]
if membrane is not None:
dZ = membrane.differentiate(dAL, activation_cache)
dAL, dW, db = layer.backward_propagate(dZ, linear_cache)
else:
dAL, dW, db = layer.backward_propagate(dAL, linear_cache)
grad = [
{
"dZ":dZ,
"dA":dAL,
"dW":dW,
"db":db
}
]
grads.insert(0, grad)
return grads
def fit(self, X=None, Y=None, epochs=1, batch_size=None, learning_rate=0.002):
# batch_size + (time, height, width, channel)
num_input_dimensions = len(self.input_shape)
num_output_dimensions = len(self.output_shape)
batch_shape = X.shape[:-num_input_dimensions]
batch_ndim = len(batch_shape)
num_samples = math.prod(batch_shape)
sample_shape = X.shape[-num_input_dimensions:]
sample_label_shape = Y.shape[-batch_ndim:]
assert(sample_label_shape == self.output_shape)
batch_samples = np.zeros(shape=tuple([batch_size]) + sample_shape)
batch_samples_labels = np.zeros(shape=tuple([batch_size]) + sample_label_shape)
# output from the opperation
output = np.zeros(shape=batch_shape+Y.shape)
# run the training data an epochs number of times
for epoch in range(epochs):
# start processing and updating the network according to the batch size
for train_start in SpikingNeuralNetwork.__traverse_batch(0, num_samples-batch_size, batch_size):
# get the end index
train_end = min(train_start+batch_size, num_samples)
# prevent over indexing at the end of the array
number_of_training_samples = train_end - train_start
# can this be optimized
batch_indices = []
for train in range(number_of_training_samples):
batch_idx = np.unravel_index(train_start + train, batch_shape)
print(batch_idx)
batch_samples[batch_idx] = X[batch_idx]
batch_samples_labels[batch_idx] = Y[batch_idx]
batch_outputs, batch_cache = self.forward_propagation(batch_samples)
final_cache = batch_cache[len(batch_cache)-1]
cache = final_cache['cache']
activation_cache = cache['activation_cache']
Vp = activation_cache['Vp']
costs = self.compute_cost(Vp, batch_samples_labels)
loss = self.compute_loss(Vp, batch_samples_labels)
# this needs to be fixed
AL = Vp
grads = self.backward_propagation(AL, batch_cache, batch_samples_labels)
parameters = self.update_parameters(batch_cache, grads, learning_rate)
# select batch images
batch_labels = np.take(Y, batch_indices)
# batch_spike_train = Model.__generate_model_spike_train(batch_size, train_labels[0])
# generate batch activation outputs
# batch_outputs = self.__generate_batch_outputs(batch_size, train_labels[0])
# generate batch cache
# batch_cache = self.__generate_batch_cache(batch_size)
# generate batch gradients
# batch_gradients = self.__generate_batch_gradients(batch_size)
# costs
# costs = np.zeros(batch_size)
# run batch # potentially multi threaded
# for i in range(0, batch_size):
# select sample from batch
#train_image = batch_images[i]
# train_label = batch_labels[i]
# convert to input to spike train
# layer_spike_train = generate_layer_spike_train(train_image, self.spike_train_length)
# propagate through network
# batch_outputs[i], batch_cache[i] = self.forward_propagation(train_image)
# dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# dA_prev_temp, dW_temp, db_temp
# calculate the cost
# costs[i] = self.compute_cost(Y, train_label)
# backwards propagate to calculate the gradients in the network
# grads = Model.backward_propagation(model, batch_outputs[i], Y, batch_cache[i])
# batch_end_idx = batch_start_idx
# update the network using the gradients from the batch
# parameters = Model.update_parameters(model, batch_cache, batch_gradients, learning_rate)
def predict(self, X):
pass
def test_case_dropout():
probability = 0.2
layer = Dropout(probability)
dim = 10000
test_shape = (dim,dim)
layer.build(input_shape=test_shape)
test_input = np.ones(test_shape)
expected = math.prod(test_shape) * (1 - probability)
print("expected: " + str(expected))
received_output, cache = layer.forward_propagate(test_input)
sum_received_output = np.sum(received_output)
print("actual: " + str(sum_received_output))
test_case_dropout()
tau_m = 10
dropout_rate = 0.2
LeNet5 = SpikingNeuralNetwork()
LeNet5.add_layer(Convolution2D(kernel_size=(5,5), strides=(1,1), number_of_filters=20), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(AveragePool2D(kernel_size=(2,2),strides=(2,2)), LeakyIntegrateAndFire(0, 0.75, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(Convolution2D(kernel_size=(5,5), strides=(1,1), number_of_filters=50), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(AveragePool2D(kernel_size=(2,2),strides=(2,2)), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(Flatten())
LeNet5.add_layer(Dense(num_outputs=200), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dense(num_outputs=10), LeakyIntegrateAndFire(0, 1, tau_m, fire=False, leaky=True))
input_shape = train_images[0].shape
input_images = np.array([generate_layer_spike_train(train_images[0], tau_m), generate_layer_spike_train(train_images[1], tau_m),
generate_layer_spike_train(train_images[2], tau_m), generate_layer_spike_train(train_images[3], tau_m),
generate_layer_spike_train(train_images[4], tau_m)])
LeNet5.build(input_images[0].shape)
lbl = train_labels[0:5,:]
LeNet5.fit(input_images, train_labels[0:5,:], batch_size=2,learning_rate=0.002)
LeNet5.predict(test_images, test_labels)
class Model:
@staticmethod
def __generate_batch_outputs(batch_size, train_label):
return np.zeros(shape=(tuple([batch_size]) + train_label.shape))
@staticmethod
def __generate_batch_cache(model, batch_size):
cache = []
time = tuple([model.spike_train_length])
for batch_idx in range(0, batch_size):
cache.extend([{
"l1_activation" : np.zeros(shape= time + model.__l1_output_dimensions),
"l1_membrane": np.zeros(shape=time + model.__l1_output_dimensions),
"l1_spike": np.zeros(shape=time + model.__l1_output_dimensions),
"l2_activation": np.zeros(shape=time + model.__l2_output_dimensions),
"l2_membrane": np.zeros(shape=time + model.__l2_output_dimensions),
"l2_spike": np.zeros(shape=time + model.__l2_output_dimensions),
"l3_activation": np.zeros(shape=time + model.__l3_output_dimensions),
"l3_membrane": np.zeros(shape=time + model.__l3_output_dimensions),
"l3_spike": np.zeros(shape=time + model.__l3_output_dimensions),
"l4_activation": np.zeros(shape=time + model.__l4_output_dimensions),
"l4_membrane": np.zeros(shape=time + model.__l4_output_dimensions),
"l4_spike": np.zeros(shape=time + model.__l4_output_dimensions),
"l5_output": np.zeros(shape=time + model.__l5_output_dimensions),
"l6_activation": np.zeros(shape=time + model.__l6_output_dimensions),
"l6_membrane": np.zeros(shape=time + model.__l6_output_dimensions),
"l6_spike": np.zeros(shape=time + model.__l6_output_dimensions),
"l7_activation": np.zeros(shape=time + model.__l7_output_dimensions),
"l7_membrane": np.zeros(shape=time + model.__l7_output_dimensions),
"l7_spike": np.zeros(shape=time + model.__l7_output_dimensions),
}])
return cache
@staticmethod
def __generate_batch_gradients(model, batch_size):
gradients = []
for i in range(batch_size):
gradients.extend([{
"l1_gradients": np.zeros(shape=model.__l1_filters.shape),
"l3_gradients": np.zeros(shape=model.__l1_filters.shape),
"l6_gradients": | np.zeros(shape=model.__l6_weights.shape) | numpy.zeros |
# -*- coding: utf-8 -*-
import sys
import struct
import os, os.path
from unittest import TestCase
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import blaze.carray as ca
from blaze.carray import chunk
from blaze.carray.tests import common
from common import MayBeDiskTest
is_64bit = (struct.calcsize("P") == 8)
# Just memory tests for now
class chunkTest(TestCase):
def test01(self):
"""Testing `__getitem()__` method with scalars"""
a = np.arange(1e3)
b = chunk(a, atom=a.dtype, cparams=ca.cparams())
#print "b[1]->", `b[1]`
self.assert_(a[1] == b[1], "Values in key 1 are not equal")
def test02(self):
"""Testing `__getitem()__` method with ranges"""
a = np.arange(1e3)
b = chunk(a, atom=a.dtype, cparams=ca.cparams())
#print "b[1:3]->", `b[1:3]`
assert_array_equal(a[1:3], b[1:3], "Arrays are not equal")
def test03(self):
"""Testing `__getitem()__` method with ranges and steps"""
a = np.arange(1e3)
b = chunk(a, atom=a.dtype, cparams=ca.cparams())
#print "b[1:8:3]->", `b[1:8:3]`
assert_array_equal(a[1:8:3], b[1:8:3], "Arrays are not equal")
def test04(self):
"""Testing `__getitem()__` method with long ranges"""
a = np.arange(1e4)
b = chunk(a, atom=a.dtype, cparams=ca.cparams())
#print "b[1:8000]->", `b[1:8000]`
assert_array_equal(a[1:8000], b[1:8000], "Arrays are not equal")
class getitemTest(MayBeDiskTest, TestCase):
def test01a(self):
"""Testing `__getitem()__` method with only a start"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(1)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test01b(self):
"""Testing `__getitem()__` method with only a (negative) start"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(-1)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test01c(self):
"""Testing `__getitem()__` method with only a (start,)"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
#print "b[(1,)]->", `b[(1,)]`
self.assert_(a[(1,)] == b[(1,)], "Values with key (1,) are not equal")
def test01d(self):
"""Testing `__getitem()__` method with only a (large) start"""
a = np.arange(1e4)
b = ca.carray(a, rootdir=self.rootdir)
sl = -2 # second last element
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test02a(self):
"""Testing `__getitem()__` method with ranges"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(1, 3)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test02b(self):
"""Testing `__getitem()__` method with ranges (negative start)"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(-3)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test02c(self):
"""Testing `__getitem()__` method with ranges (negative stop)"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(1, -3)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test02d(self):
"""Testing `__getitem()__` method with ranges (negative start, stop)"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(-3, -1)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test02e(self):
"""Testing `__getitem()__` method with start > stop"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(4, 3, 30)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test03a(self):
"""Testing `__getitem()__` method with ranges and steps (I)"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(1, 80, 3)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test03b(self):
"""Testing `__getitem()__` method with ranges and steps (II)"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(1, 80, 30)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test03c(self):
"""Testing `__getitem()__` method with ranges and steps (III)"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(990, 998, 2)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test03d(self):
"""Testing `__getitem()__` method with ranges and steps (IV)"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(4, 80, 3000)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test04a(self):
"""Testing `__getitem()__` method with long ranges"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=100, rootdir=self.rootdir)
sl = slice(1, 8000)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test04b(self):
"""Testing `__getitem()__` method with no start"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=100, rootdir=self.rootdir)
sl = slice(None, 8000)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test04c(self):
"""Testing `__getitem()__` method with no stop"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=100, rootdir=self.rootdir)
sl = slice(8000, None)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test04d(self):
"""Testing `__getitem()__` method with no start and no stop"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=100, rootdir=self.rootdir)
sl = slice(None, None, 2)
#print "b[sl]->", `b[sl]`
assert_array_equal(a[sl], b[sl], "Arrays are not equal")
def test05(self):
"""Testing `__getitem()__` method with negative steps"""
a = np.arange(1e3)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
sl = slice(None, None, -3)
#print "b[sl]->", `b[sl]`
self.assertRaises(NotImplementedError, b.__getitem__, sl)
class getitemDiskTest(getitemTest):
disk = True
class setitemTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing `__setitem()__` method with only one element"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
b[1] = 10.
a[1] = 10.
#print "b->", `b`
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test00b(self):
"""Testing `__setitem()__` method with only one element (tuple)"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
b[(1,)] = 10.
a[(1,)] = 10.
#print "b->", `b`
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test01(self):
"""Testing `__setitem()__` method with a range"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
b[10:100] = np.arange(1e2 - 10.)
a[10:100] = np.arange(1e2 - 10.)
#print "b->", `b`
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test02(self):
"""Testing `__setitem()__` method with broadcasting"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
b[10:100] = 10.
a[10:100] = 10.
#print "b->", `b`
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test03(self):
"""Testing `__setitem()__` method with the complete range"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=10, rootdir=self.rootdir)
b[:] = np.arange(10., 1e2 + 10.)
a[:] = np.arange(10., 1e2 + 10.)
#print "b->", `b`
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test04a(self):
"""Testing `__setitem()__` method with start:stop:step"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=1, rootdir=self.rootdir)
sl = slice(10, 100, 3)
b[sl] = 10.
a[sl] = 10.
#print "b[%s] -> %r" % (sl, b)
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test04b(self):
"""Testing `__setitem()__` method with start:stop:step (II)"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=1, rootdir=self.rootdir)
sl = slice(10, 11, 3)
b[sl] = 10.
a[sl] = 10.
#print "b[%s] -> %r" % (sl, b)
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test04c(self):
"""Testing `__setitem()__` method with start:stop:step (III)"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=1, rootdir=self.rootdir)
sl = slice(96, 100, 3)
b[sl] = 10.
a[sl] = 10.
#print "b[%s] -> %r" % (sl, b)
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test04d(self):
"""Testing `__setitem()__` method with start:stop:step (IV)"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=1, rootdir=self.rootdir)
sl = slice(2, 99, 30)
b[sl] = 10.
a[sl] = 10.
#print "b[%s] -> %r" % (sl, b)
assert_array_equal(a, b[:], "__setitem__ not working correctly")
def test05(self):
"""Testing `__setitem()__` method with negative step"""
a = np.arange(1e2)
b = ca.carray(a, chunklen=1, rootdir=self.rootdir)
sl = slice(2, 99, -30)
self.assertRaises(NotImplementedError, b.__setitem__, sl, 3.)
class setitemDiskTest(setitemTest):
disk = True
class appendTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing `append()` method"""
a = np.arange(1000)
b = ca.carray(a, rootdir=self.rootdir)
b.append(a)
#print "b->", `b`
c = | np.concatenate((a, a)) | numpy.concatenate |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
def vshale_gr(gr_curve,gr_sand,gr_shale,type='linear'):
"""vshale_gr [summary]
Parameters
----------
gr_curve : [type]
[description]
gr_sand : [type]
[description]
gr_shale : [type]
[description]
type : str, optional
[description], by default 'linear'
Returns
-------
[type]
[description]
Raises
------
ValueError
[description]
"""
gr_curve=np.atleast_1d(gr_curve)
gr_sand=np.atleast_1d(gr_sand)
gr_shale=np.atleast_1d(gr_shale)
igr=(gr_curve-gr_sand)/(gr_shale-gr_sand)
igr[igr < 0.0] = 0.0
igr[igr > 1.0] = 1.0
#https://www.geoloil.com/VshModels.php
if type == 'linear':
vsh = igr
elif type == 'clavier':
vsh = 1.7 - np.sqrt(3.38 - np.power(igr+0.7,2))
elif type == 'stieber':
vsh = igr/(3-2*igr)
elif type == 'larionov_old':
vsh = 0.33 * (np.power(2,2*igr)-1)
elif type == 'larionov_tertiary':
vsh = 0.083 * (np.power(2,3.7*igr)-1)
else:
raise ValueError(f'method especified [ {type} ] does not exist')
return vsh
def vshale_dn(rho_curve, ntr_curve, rho_ma=2.65, rho_f=1.0, hi_shl=0.46,rho_shl=2.43):
"""vshale_dn [summary]
Parameters
----------
rho_curve : [type]
[description]
ntr_curve : [type]
[description]
rho_ma : float, optional
[description], by default 2.65
rho_f : float, optional
[description], by default 1.0
hi_shl : float, optional
[description], by default 0.46
rho_shl : float, optional
[description], by default 2.43
Returns
-------
[type]
[description]
"""
rho_curve= np.atleast_1d(rho_curve)
ntr_curve= np.atleast_1d(ntr_curve)
rho_ma = np.atleast_1d(rho_ma)
rho_f = np.atleast_1d(rho_f)
hi_shl = np.atleast_1d(hi_shl)
rho_shl = np.atleast_1d(rho_shl)
vsh = (rho_curve - rho_ma + ntr_curve*(rho_ma-rho_f))/(rho_shl - rho_ma + hi_shl*(rho_ma-rho_f))
vsh[vsh < 0.0] = 0.0
vsh[vsh > 1.0] = 1.0
return vsh
def phi_rho(rho_curve,rho_ma=2.65,rho_f=1.0):
"""phi_rho [summary]
Parameters
----------
rho_curve : [type]
[description]
rho_ma : float, optional
[description], by default 2.65
rho_f : float, optional
[description], by default 1.0
Returns
-------
[type]
[description]
"""
rho_curve=np.atleast_1d(rho_curve)
rho_ma=np.atleast_1d(rho_ma)
rho_f=np.atleast_1d(rho_f)
phi_rho_curve=(rho_ma-rho_curve)/(rho_ma-rho_f)
phi_rho_curve[phi_rho_curve < 0.0] = 0.0
phi_rho_curve[phi_rho_curve > 1.0] = 1.0
return phi_rho_curve
def phie(phi_curve,vsh_curve):
"""phie [summary]
Parameters
----------
phi_curve : [type]
[description]
vsh_curve : [type]
[description]
Returns
-------
[type]
[description]
"""
phi_curve=np.atleast_1d(phi_curve)
vsh_curve=np.atleast_1d(vsh_curve)
phie_curve=phi_curve*(1 -vsh_curve)
phie_curve[phie_curve < 0.0] = 0.0
phie_curve[phie_curve > 0.3] = 0.3
return phie_curve
def phia(phi_rho_curve, ntr_curve, method='geometric'):
"""phia [summary]
Parameters
----------
phi_rho_curve : [type]
[description]
ntr_curve : [type]
[description]
method : str, optional
[description], by default 'geometric'
Returns
-------
[type]
[description]
"""
phi_rho_curve = np.atleast_1d(phi_rho_curve)
ntr_curve = np.atleast_1d(ntr_curve)
c = np.transpose(np.vstack((phi_rho_curve,ntr_curve)))
if method == 'mean':
phia_curve = np.mean(c,axis=1)
elif method== 'geometric':
phia_curve = np.power(((np.power(phi_rho_curve,2)+np.power(ntr_curve,2))/2),0.5)
return phia_curve
def facies_dnp(rho_curve, ntr_curve,pef_curve,**kw):
"""facies_dnp [summary]
Parameters
----------
rho_curve : [type]
[description]
ntr_curve : [type]
[description]
pef_curve : [type]
[description]
Returns
-------
[type]
[description]
"""
rho_curve = np.atleast_1d(rho_curve)
ntr_curve = np.atleast_1d(ntr_curve)
pef_curve = np.atleast_1d(pef_curve)
phi_rho_curve = phi_rho(rho_curve,**kw)
phia_curve = phia(phi_rho_curve,ntr_curve)
u = pef_curve*((rho_curve + 0.1833)/1.07)
uma = (u - 0.398 * phia_curve)/(1-phia_curve)
dga = (rho_curve - phia_curve)/(1-phia_curve)
return uma, dga
def sw(rt_curve,phi_curve,rw,vsh_curve=None,a=0.62,m=2.15,n=2,rsh=4.0,alpha=0.3,method="archie"):
"""sw [summary]
Parameters
----------
rt_curve : [type]
[description]
phi_curve : [type]
[description]
rw : [type]
[description]
vsh_curve : [type], optional
[description], by default None
a : float, optional
[description], by default 0.62
m : float, optional
[description], by default 2.15
n : int, optional
[description], by default 2
rsh : float, optional
[description], by default 4.0
alpha : float, optional
[description], by default 0.3
method : str, optional
[description], by default "archie"
Returns
-------
[type]
[description]
"""
a=np.atleast_1d(a)
m=np.atleast_1d(m)
n=np.atleast_1d(n)
vsh = np.atleast_1d(vsh_curve) if vsh_curve is not None else None
rsh=np.atleast_1d(rsh)
alpha=np.atleast_1d(alpha)
rt=np.atleast_1d(rt_curve)
phi = np.atleast_1d(phi_curve)
rw=np.atleast_1d(rw)
if method == "archie":
sw_curve=np.power(((a*rw)/(rt*np.power(phi,m))),1/n)
elif method == "smdx": #https://www.spec2000.net/14-sws.htm
C=((1-vsh)*a*rw)/np.power(phi,m)
D=C*vsh/(2 * rsh)
E=C/rt
sw_curve=np.power(np.sqrt(D**2 + E) - D, 2/n)
elif method == "indo":
#https://geoloil.com/Indonesia_SW.php
#A=np.sqrt(1 /rt)
#B=(np.power(vsh,(1 -(vsh/2)))/np.sqrt(rsh))
#C=np.sqrt(np.power(phi,m)/(a*rw))
#sw_curve=np.power((A/(B+C)),2/n)
#http://nafta.wiki/display/GLOSSARY/Indonesia+Model+%28Poupon-Leveaux%29+@model
A_inv = 1 + np.sqrt((np.power(vsh,2-vsh)*rw)/(phi*rsh))
A = 1/A_inv
sw_curve = np.power((A*rw)/(np.power(phi,m)*rt),1/n)
elif method == "fertl":
A=np.power(phi,-m/2)
B=(a*rw)/rt
C=((alpha*vsh)/2)**2
sw_curve=A*((np.sqrt(B+C))-np.sqrt(C))
sw_curve[sw_curve < 0.0] = 0.0
sw_curve[sw_curve > 1.0] = 1.0
return sw_curve
def depth_temperature(depth, surface_temperature=77 ,gradient=1):
"""depth_temperature [summary]
Parameters
----------
depth : [type]
[description]
surface_temperature : int, optional
[description], by default 77
gradient : int, optional
[description], by default 1
Returns
-------
[type]
[description]
"""
depth = np.atleast_1d(depth)
t = (gradient/100) * depth + surface_temperature
return t
def rw_temp_convert(rw,t1,t2, temp_unit='f'):
"""rw_temp_convert [summary]
Parameters
----------
rw : [type]
[description]
t1 : [type]
[description]
t2 : [type]
[description]
temp_unit : str, optional
[description], by default 'f'
Returns
-------
[type]
[description]
"""
rw = np.atleast_1d(rw)
t1 = np.atleast_1d(t1)
t2 = np.atleast_1d(t2)
if temp_unit=='f':
c = np.array([6.77])
else:
c = np.array([21.5])
rw2 = rw*((t1 + c)/(t2 + c))
return rw2
def rw(temp, salinity,temp_unit='f'):
"""rw [summary]
Parameters
----------
temp : [type]
[description]
salinity : [type]
[description]
temp_unit : str, optional
[description], by default 'f'
Returns
-------
[type]
[description]
"""
# 1) Convert from Celcius to Farenheit
if temp_unit=='c':
tf = 1.8*temp + 32.0
else:
tf=temp
# 2) Calculate Resistivity in Ohm meters
rw = | np.power((400000.0/(tf*salinity)),0.88) | numpy.power |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 12:05:57 2017
@author: manishdevana
Further experiments with ray tracing after doing wave property calculations
(runs calculations in internal_wave_properties)
"""
import numpy as np
import matplotlib.pyplot as plt
import data_load
import gsw
import oceans as oc
import Internal_wave_properties_REV as iwp
import pandas as pd
# Testdata load
def internal_wave_properties(save_only=True):
ladcp, ctd, bathy = data_load.load_data()
rho_neutral = np.genfromtxt('neutral_rho.csv', delimiter=',')
strain = np.genfromtxt('strain.csv', delimiter=',')
wl_max=350
wl_min=100
lambdaH, kh, omega, N2, dist, depths,\
U2, V2, p_ladcp, Uspec, Vspec,\
etaSpec, aspect, Ek, Ep, Etotal = iwp.frequencyEstimator(ctd, ladcp, bathy,\
rho_neutral,strain, wl_max=500, full_set=True)
if not save_only:
return lambdaH, kh, omega, N2, dist, depths,\
U2, V2, p_ladcp, Uspec, Vspec, etaSpec, aspect
def doppler_shifts(kh, ladcp, avg=1000, bin_size = 512):
"""
Doppler shift the internal frequency to test for lee waves
using the depth averaged floww
"""
U, V, p_ladcp = oc.loadLADCP(ladcp)
maxDepth = 4000
idx_ladcp = p_ladcp[:,-1] <= maxDepth
dz = int(np.nanmean(np.gradient(p_ladcp, axis=0)))
window = int(np.ceil(avg/dz))
Ubar = []
for u, v in zip(U.T, V.T):
mask = np.isfinite(u)
u = u[mask]
v = v[mask]
u = np.nanmean(u[-window:])
v = np.nanmean(v[-window:])
Ubar.append(np.sqrt(u**2 + v**2))
Ubar = np.vstack(Ubar)
dshift = []
for cast, ubar in zip(kh.T, Ubar):
dshift.append(cast*ubar)
dshift = np.vstack(dshift).T
return dshift
def horizontal_wave_vector_decomposition(Uprime, Vprime, axis=1, nfft=1024):
"""
Attempt to decompose horizontal wave vector
Following methods used in Polzin 2007 (internal waves in eddies or something like that)
"""
# U'* x b'
Uspec = np.fft(Uprime, axis=axis, nfft=nfft)
Vspec = np.fft(Vprime, axis=axis, nfft=nfft)
theta = []
for Uin, Vin in zip(Uspec, Vspec):
u_conj = np.conj(Uin[:,1])
v_prime = Vin[:,1]
u_prime = Uin[:,1]
v_conj = np.conj(Vin[:,1])
theta.append(np.arctan(2*np.real((u_conj*v_prime)/(u_conj*u_prime - v_conj*v_prime)))/2)
theta = np.vstack(theta).T
k = -kh*np.cos(theta)
k_wave = (2*np.pi/k)*1e-3
l = kh*np.sin(theta)
l_wave = (2*np.pi/l)*1e-3
return k, l
def lee_wave_tests(kh, omega, N2, ctd, ladcp, dist, depths, error_factor=2, plots=False):
"""
Testing whether or not the observations can be attributed to lee waves
"""
S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
# k, l = horizontal_wave_vector_decomposition(Uspec, Vspec)
dshift = doppler_shifts(kh, ladcp)
# Test whether K*U is between N and f
f = (np.nanmean(gsw.f(lat)))
dshiftTest = np.full_like(kh, np.nan)
test_final = np.full_like(kh, np.nan)
for i, dump in enumerate(kh.T):
N2mean = np.nanmean(N2[:,i])
testA = np.abs(dshift[:,i]**2-f**2) <= f**2
testB = | np.abs(dshift[:,i]**2-f**2) | numpy.abs |
#!/usr/bin/python3
"""
一些基础的类和函数
注意, import关系需要能够拓扑排序(不要相互调用).
"""
# 加载不应该被COPY的包
import io2 as io
import deap
from deap import algorithms, base, creator, gp, tools
from prettytable import PrettyTable
# COPY #
import copy
import random
import warnings
import sys
import pdb
import inspect
import shutil
import os
import time
import argparse
import datetime
import collections
import traceback
import math
import subprocess
import yaml
import multiprocessing
from itertools import repeat
from functools import partial
from copy import deepcopy
# 禁用NumPy自带的多线程, 这样一个进程最多100% CPU占用. 这段代码必须保证在import numpy前执行.
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
# 加载外部包
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
class EvalInfo:
"""currently contains shape and unit information to evaluate."""
def __init__(self, shape, unit, ret_type):
"""shape should be tuple, while unit should be np.ndarray."""
self.shape = shape
self.unit = unit
self.ret_type = ret_type
class Individual:
"""an individual in genetic programming"""
def __init__(self, expr=None, fitness=None, fitness_raw=None, pnl=None, turnover=None):
self.expr = expr
self.fitness = fitness
self.fitness_raw = fitness_raw
self.pnl = pnl
self.turnover = turnover
self.stats = dict()
class IllegalSyntex(Exception):
"""illegal syntax when checking."""
pass
class Array2d:
"""a symbolic class, only used for STGP."""
pass
class Array2dNeutralise:
"""由于中性化参数也是需要根据输入的数据调整的, 因此必须把X_NEUTRALISE作为一个参数传入. 它由这个类代表."""
pass
class Array2dValid:
"""同上. 表示例如UnivTOP4000.valid"""
pass
class Array3d:
"""a symbolic class, only used for STGP."""
pass
class Ephemeral:
"""a class representing ephemeral constants."""
pass
class FinalResult:
"""a class representing the final result, usual generated from ewma."""
pass
# 修改 ###################################################################
# 可以在这里添加自定义STGP类, 和需要被COPY的自定义函数.
# 修改 ###################################################################
def check_same_unit(unit_1, unit_2):
"""check whether two units are numerically similar, by calculating the chebyshev distance."""
epsilon = 0.001
if np.max(np.abs(unit_1 - unit_2)) <= epsilon:
return True
else:
return False
def replace_inf(arr):
ret = arr.copy()
ret[np.isinf(ret)] = np.nan
return ret
def mask(arr):
"""returns a boolean mask of an arr"""
return ~np.isnan(arr)
def imposter(arr):
"""returns an imposter of an arr"""
return np.full_like(arr, np.nan)
def ts_delay(arr, window=1, axis=0):
"""delay by window along an axis. the first/last window rows are filled with nan. """
ret = arr.copy()
if window >= 0: # if window == 0, returns exactly the input
slc1 = [slice(None)] * len(arr.shape)
slc1[axis] = slice(window, arr.shape[axis])
slc2 = [slice(None)] * len(arr.shape)
slc2[axis] = slice(0, arr.shape[axis] - window)
slc3 = [slice(None)] * len(arr.shape)
slc3[axis] = slice(0, window)
ret[tuple(slc1)] = ret[tuple(slc2)]
ret[tuple(slc3)] = np.nan
else: # delay by negative, fetching future data
slc1 = [slice(None)] * len(arr.shape)
slc1[axis] = slice(-window, arr.shape[axis])
slc2 = [slice(None)] * len(arr.shape)
slc2[axis] = slice(0, window)
slc3 = [slice(None)] * len(arr.shape)
slc3[axis] = slice(window, arr.shape[axis])
ret[tuple(slc2)] = ret[tuple(slc1)]
ret[tuple(slc3)] = np.nan
return ret
def rolling(arr, window, f, axis=0):
"""
rolling with NumPy and for loop. Note: np.nanxxx is much slower than np.xxx
:param f: a function which accepts array and axis as the first two arguments. e.g. np.nanstd
"""
ret = []
slc = [slice(None)] * len(arr.shape)
for ti in range(arr.shape[axis]):
slc[axis] = slice(max(ti - window + 1, 0), ti + 1)
rolling_data = arr[tuple(slc)]
ret.append(f(rolling_data, axis))
ret = np.stack(ret, axis=axis)
return ret
def rolling_cross(x, y, window, f, axis):
"""
rolling fucn of two arrays
:param f: a function which accepts two arrays and axis as the first three arguments. e.g. cal_pearson_r
"""
ret = []
slc = [slice(None)] * len(x.shape)
for ti in range(x.shape[axis]):
slc[axis] = slice(max(ti - window + 1, 0), ti + 1)
rolling_x = x[tuple(slc)]
rolling_y = y[tuple(slc)]
ret.append(f(rolling_x, rolling_y, axis=axis))
ret = np.stack(ret, axis=axis)
return ret
def ts_quantile_aux(arr, axis, standardize):
"""用于ts_quantile的辅助函数, 会作为f传入rolling中. axis参数不管设成什么都按照0来算. """
arr_rank = (arr[-1, ...][np.newaxis, ...] > arr).sum(0).astype('float')
arr_rank[np.isnan(arr[-1, ...])] = np.nan
if standardize:
arr_rank = arr_rank / mask(arr).sum(0)
return arr_rank
def rank(arr, axis=1, method='average'):
"""rank along an axis, starting at zero. deals with nan. """
ranks = stats.rankdata(arr, method=method, axis=axis).astype('float') # nans are given largest rank
ranks[np.isnan(arr)] = np.nan # mstats.rankdata assign 0 to masked values
return ranks - 1
#def cal_pearson_r(x, y, axis=0):
# """calculate Pearson correlation coefficient along an axis."""
# x = x.copy() # 关键的步骤, 如果不进行会导致对数据进行inplace的修改, 最终nan充满整个数组.
# y = y.copy()
# nanmask = (np.isnan(x) | np.isnan(y)) # make x and y have the same nan values
# x[nanmask] = np.nan
# y[nanmask] = np.nan
# x = x - np.nanmean(x, axis=axis, keepdims=True)
# y = y - np.nanmean(y, axis=axis, keepdims=True)
# result = np.nansum(x * y, axis) / np.sqrt(np.nansum(x ** 2, axis) * np.nansum(y ** 2, axis))
# return result
def cal_pearson_r(x, y, axis=0):
#import pdb; pdb.set_trace()
#raise Exception('bug')
xy = np.hstack((x, y))
isnan = np.isnan(xy).any(axis=1)
_x = x[np.ix_(~isnan)]
_y = y[np.ix_(~isnan)]
if _x.shape[0] < 2:
return np.nan
else:
_x = _x - np.mean(_x, axis=axis, keepdims=True)
_y = _y - np.mean(_y, axis=axis, keepdims=True)
if np.allclose(_x, 0) or np.allclose(_y, 0):
return 0.0
else:
res = np.sum(_x*_y) / np.sqrt(np.sum(_x**2, axis)) / np.sqrt(np.sum(_y**2, axis))
return res[0]
def cal_cov(x, y, axis=0):
"""calculate covariance along an axis."""
x = x.copy() # 关键的步骤, 如果不进行会导致对数据进行inplace的修改, 最终nan充满整个数组.
y = y.copy()
nanmask = (np.isnan(x) | np.isnan(y)) # make x and y have the same nan values
x[nanmask] = np.nan
y[nanmask] = np.nan
x = x - np.nanmean(x, axis=axis, keepdims=True)
y = y - np.nanmean(y, axis=axis, keepdims=True)
result = np.nansum(x * y, axis) / (~nanmask).sum(axis, keepdims=True)
return result
#def load_data_2d(fields, f_load_data):
# """读取数据. f_load_data中已经包含了start_date的信息. """
# data = dict()
# for field in fields:
# field_ = field.split('.')[-1]
# data[field_] = f_load_data(field)
# return data
def load_data_2d(fields, f_load_data):
"""读取数据. f_load_data中已经包含了start_date的信息. """
data = dict()
for field in fields:
data[field.replace('.', '_')] = f_load_data(field).to_numpy()
return data
def load_tradedate(field, f_load_data):
return f_load_data(field).index
def alpha_to_weights(alpha):
"""归一化. 最终截面绝对值和为2. """
alpha = alpha - np.nanmean(alpha, axis=1, keepdims=True)
mask_pos = (alpha > 0)
mask_neg = (alpha < 0)
alpha_pos = imposter(alpha)
alpha_pos[mask_pos] = alpha[mask_pos]
alpha_pos = alpha_pos / np.nansum(alpha_pos, 1, keepdims=True)
alpha_neg = imposter(alpha)
alpha_neg[mask_neg] = alpha[mask_neg]
alpha_neg = -alpha_neg / np.nansum(alpha_neg, 1, keepdims=True)
alpha[mask_pos] = alpha_pos[mask_pos]
alpha[mask_neg] = alpha_neg[mask_neg]
return alpha
# ENDCOPY # 以下为提交时不需要的函数
# 修改 ###################################################################
# 可以在这里添加不应该或不需要被COPY的自定义函数(如可能导致cython编译出现问题)
# 修改 ###################################################################
def get_eval_info(expr, dict_operators, dict_data_eval_info):
"""
tries to get EvalInfo of expr's root node.
if legal, returns EvalInfo of root node; otherwise, raises IllegalSyntax exception.
原来写的check_syntax函数代码习惯比较糟糕, 导致出现了各种问题. 现在写一个更规范的版本.
我们在这里尽量规避使用eval()函数. 事实上, 任何情况下都应避免使用该函数. 此后计算阿尔法值的代码可能也要改.
TODO: 可能需要更改计算阿尔法值的代码
取而代之, 利用该list深度优先遍历的特性, 采用递归的方法检查是否存在句法错误.
首先从根节点开始, 计算它的所有子节点处的EvalInfo, 随后检查该节点处的输入是否符合算子的句法.
而子节点处的EvalInfo用类似方法计算, 直到某个子节点不再有任何入度(arity, 每一个元素都有这个属性),
此时返回的是该terminal的EvalInfo(如果是数据则有显示定义, 如果是ephemeral则可以返回任何东西, 因为不参与任何运算).
至于如何检查句法, 我们通过传入的operators找到节点名字对应的算子, 直接调用该算子.
调用算子过程中可能raise IllegalSyntax错误, 则会向外不断抛出, 需要接住(如check_syntax中的处理)
TODO: 这个写法涉及一些重复计算, 可能效率较低
:param expr: list, holds tree expression from DFS
"""
if expr[0].arity > 0: # primitive
eval_info_of_subtrees = []
begin = 1
while True:
# 寻找子树. 这部分代码来自gp.PrimitiveTree的searchSubtree方法
end = begin + 1
total = expr[begin].arity
while total > 0:
total += expr[end].arity - 1
end += 1
# 上述循环结束. 此时[begin, end)是子树的区间. end刚好停在下一个子树的开始, 或者在数组末尾.
eval_info_of_subtrees.append(get_eval_info(expr[begin: end], dict_operators, dict_data_eval_info))
begin = end
if end == len(expr): # 已经进行到列表的末尾
break
f = dict_operators[expr[0].name][0]
return f(*eval_info_of_subtrees)
else: # terminal, could be data or ephemeral
if expr[0].ret == Ephemeral:
return expr[0].value # Ephemeral则返回它自己的值, 因为有些算子(例如SIGNED_POWER)需要用到它计算unit
else: # data
return dict_data_eval_info[expr[0].value] # .value returns e.g. 'OPEN', while .name returns 'ARG0'
def check_syntax(expr, dict_operators, dict_data_eval_info):
"""检查一个输入列表expr的句法是否正确."""
try:
eval_info = get_eval_info(expr, dict_operators, dict_data_eval_info)
return True
except IllegalSyntex:
return False
def compare_subtree(expr1, expr2, dict_operators, dict_data_eval_info):
"""
We check whether the return type, shape, and units of two subtrees are the same, before performing crossover or mutation.
"""
if expr1[0].ret != expr2[0].ret: # check return type
return False
eval_info_1 = get_eval_info(expr1, dict_operators, dict_data_eval_info)
eval_info_2 = get_eval_info(expr2, dict_operators, dict_data_eval_info)
# check shape and unit
try:
if eval_info_1.shape != eval_info_2.shape or check_same_unit(eval_info_1.unit, eval_info_2.unit) == False:
return False
return True
except AttributeError:
return False
def find_primitive(pset, return_type, name):
"""find a primitive in pset by name"""
for op in pset.primitives[return_type]:
if op.name == name:
return op
print("Primitive not found!")
return None
def find_terminal(pset, return_type, value=None):
"""find a terminal in pset by name"""
for op in pset.terminals[return_type]:
# 这里用了or的short-circuiting. Ephemeral类型没有name或value属性, 故先判断是否为Ephemeral; 若不是, 再比较value.
if return_type == Ephemeral or op.value == value:
if inspect.isclass(op): # Ephemeral类需要实例化. 其他算子直接就是函数了.
return op()
else:
return op
print("Terminal not found!")
return None
def cal_pool_corr(pnl, pnl_pool):
"""
计算一个阿尔法与pool的pearson最大相关系数. 下面n_days必须相同, 且为与asim一致, 必须都为500天, 且时间戳需对齐.
:param pnl: shape = (n_days,)的ndarray
:param pnl_pool: shape = (n_days, n_alphas)的ndarray
:param axis: 沿哪个轴计算
"""
# 用np.broadcast_to比用repeat快一点
maxcorr = np.max(cal_pearson_r(np.broadcast_to(pnl[:, np.newaxis], pnl_pool.shape), pnl_pool, axis=0))
return maxcorr
def maxcorr_with_pool(pnl, pnl_pool):
res = []
x = pnl.reshape(len(pnl), 1)
if len(pnl_pool.shape) > 1:
for i in range(pnl_pool.shape[1]):
y = pnl_pool[:, i].reshape(len(pnl), 1)
res.append(cal_pearson_r(x, y))
return max(res)
else:
return cal_pearson_r(x, pnl_pool.reshape(len(pnl), 1))
def expr_to_str(expr):
return str(gp.PrimitiveTree(expr))
def cal_ic_series(alpha, future_returns, ranked=True):
"""
calculate time series of information coefficient
"""
if ranked: # calculate rank IC
alpha = rank(alpha, axis=1)
future_returns = rank(future_returns, axis=1)
ic_series = cal_pearson_r(alpha, future_returns, axis=1)
return ic_series
def cal_pnl_series(alpha, today_returns):
"""
计算pnl序列, 其实只是收益率序列, 因为只差本金(常数).
:param alpha: 一个2维ndarray. 其必须满足值可以解释为权重, 例如每天的正数部分和负数部分和都为1.
:param today_returns: 也是2d数组, 其与alpha对齐的时间代表当日的收益率(alpha本身是用delay的数据计算的)
:return: 返回的是当日的策略收益率
"""
return np.nansum(ts_delay(alpha, 1) * today_returns, 1) / 2 # 有多空, 收益率要除以2. 注意若全nan则当日为0.
# basic functions used in mutation and crossover
def exception_printer(k, name):
pass # 测试时打印太多了, 先关掉
# if k == 0:
# print(f"=====Catch exception in function {name}=====")
def compare(ind1, ind2, cxpoint1, cxpoint2, dict_operators, dict_data_eval_info):
tree1, tree2 = gp.PrimitiveTree(ind1), gp.PrimitiveTree(ind2)
root1, root2 = ind1[cxpoint1], ind2[cxpoint2]
# Eliminate the situation while leaf(terminal) is selected as "subtree".
if(type(root1) == 'deap.gp.Terminal' and type(root2) == 'deap.gp.Terminal'):
return (False, 0, 0)
slice1, slice2 = tree1.searchSubtree(cxpoint1), tree2.searchSubtree(cxpoint2)
sublst1, sublst2 = ind1[slice1], ind2[slice2]
# Only consider crossover of subtree when its height is greater than 1.
if compare_subtree(sublst1, sublst2, dict_operators, dict_data_eval_info) and \
gp.PrimitiveTree(sublst1).height >= 1 and gp.PrimitiveTree(sublst2).height >= 1:
return (True, len(sublst1), len(sublst2))
else:
return (False, 0, 0)
def pw(text, log):
"""print and write. note that write() does not append newline automatically, and print() is adjusted accordingly."""
print(text, end='')
log.write(text)
def get_population_statistics(population):
"""获取一个种群的统计量. 注意所有个体必须都有fitness."""
fitness_list = np.sort(np.array([individual.fitness for individual in population if not (np.isinf(individual.fitness) or np.isnan(individual.fitness))]))
text = f'population size: {len(population)}, valid individual size: {len(fitness_list)}\n'
if len(fitness_list)==0: return text
statistics = [
np.mean(fitness_list),
np.std(fitness_list),
stats.skew(fitness_list),
stats.kurtosis(fitness_list),
fitness_list[0],
fitness_list[int(len(fitness_list) * 0.25)],
fitness_list[int(len(fitness_list) * 0.5)],
fitness_list[int(len(fitness_list) * 0.75)],
fitness_list[-1]
]
text += 'MEAN:{:4.2f} STD :{:4.2f} SKEW:{:4.2f} KURT:{:4.2f}\n' \
'MIN :{:4.2f} QT25:{:4.2f} QT50:{:4.2f} QT75:{:4.2f} MAX :{:4.2f}\n'.format(*statistics)
return text
def table_population_statistics(population, title):
"""获取一个种群的统计量. 注意所有个体必须都有fitness."""
fitness_list = [(x.fitness, abs(x.fitness_raw)) for x in population if not (np.isinf(x.fitness) or np.isnan(x.fitness))]
fitness_list.sort(key=lambda x:x[0])
fitness_list = np.array(fitness_list)
ttc, vac = len(population), len(fitness_list)
if vac > 0:
q25, q50, q75 = fitness_list[int(vac*0.25), :], fitness_list[int(vac*0.5), :], fitness_list[int(vac*0.75), :]
mm, ss, mi, ma = np.mean(fitness_list, axis=0), np.std(fitness_list, axis=0), fitness_list[0, :], fitness_list[-1, :]
else:
q25, q50, q75 = ([np.nan]*2, ) * 3
mm, ss, ma, mi= ([np.nan]*2, ) * 4
table = PrettyTable()
table.title = title
table.field_names = ['No.', 'Stats', 'Value1', 'Value2']
table.add_row(['0', 'mean', f'{mm[0]:.2f}', f'{mm[1]:.2f}'])
table.add_row(['1', 'std', f'{ss[0]:.2f}', f'{ss[1]:.2f}'])
table.add_row(['2', 'max', f'{ma[0]:.2f}', f'{ma[1]:.2f}'])
table.add_row(['3', 'Q75', f'{q75[0]:.2f}', f'{q75[1]:.2f}'])
table.add_row(['4', 'Q50', f'{q50[0]:.2f}', f'{q50[1]:.2f}'])
table.add_row(['5', 'Q25', f'{q25[0]:.2f}', f'{q25[1]:.2f}'])
table.add_row(['6', 'min', f'{mi[0]:.2f}', f'{mi[1]:.2f}'])
table.add_row(['7', 'ttCount', f'{ttc:.0f}', f'{ttc:.0f}'])
table.add_row(['8', 'vaCount', f'{vac:.0f}', f'{vac:.0f}'])
return table
def cal_frequent_subtrees(population, hof_num=10):
'''
Calculate frequently appeared subtrees (with certain operations and data).
The output will be used in subtreeMT function. Computationally inexpensive.
'''
all_count = []
for individual in population:
ind1 = individual.expr
tree1 = gp.PrimitiveTree(ind1)
size = len(ind1)
for cxpoint1 in range(size):
t1 = ind1[tree1.searchSubtree(cxpoint1)]
subtree1 = gp.PrimitiveTree(t1)
if subtree1.height > 1:
all_count.append(t1)
result = pd.value_counts(all_count)
return result.index[0:hof_num]
def cal_frequent_structure(population, hof_num=10):
'''
Calculate frequently appeared structures (with consecutive certain primitives, but the auxiliary data could be arbitrary.)
'''
all_count = []
for individual in population:
this_count = []
ind1 = individual.expr
size = len(ind1)
for cxpoint1 in range(size):
if isinstance(ind1[cxpoint1], deap.gp.Primitive):
this_count.append(ind1[cxpoint1])
else:
if (len(this_count) > 2):
all_count.append(this_count)
this_count = []
result = pd.value_counts(all_count)
return result.index[0:hof_num]
def f_load_data_io(field, data_folder, start_date, end_date):
"""用io的函数读取数据. 这个函数仅用于load_data_2d的f_load_data参数. 另一种f_load_data仅在submit时才定义, 使用self.get_data"""
# 尝试读为2d, 若失败则读为1d数据
data_field = io.read2d_from_asimcache(os.path.join(data_folder, field))[1].to_dataframe()
if data_field is None:
data_field = io.read1d_from_asimcache(os.path.join(data_folder, field))[1].to_dataframe()
if data_field is None:
raise AttributeError(f'{field} not found')
data_field = data_field.loc[start_date: end_date]
return data_field
def safe_regression(X:np.ndarray, Y:np.ndarray):
y, x = Y.reshape(len(Y), 1), X.reshape(len(X), 1)
yx = np.hstack((y, x))
nanspl = | np.isnan(yx) | numpy.isnan |
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pytest
from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField
from pde.fields.base import FieldBase
from pde.grids import CartesianGrid, CylindricalGrid, PolarGrid, SphericalGrid, UnitGrid
from pde.grids.cartesian import CartesianGridBase
from pde.tools.misc import skipUnlessModule
from scipy import ndimage
@pytest.mark.slow
@pytest.mark.parametrize("field_class", [ScalarField, Tensor2Field])
def test_interpolation_natural(example_grid, field_class):
""" test some interpolation for natural boundary conditions """
msg = f"grid={example_grid}, field={field_class}"
f = field_class.random_uniform(example_grid)
if isinstance(example_grid, CartesianGridBase):
p = example_grid.get_random_point(boundary_distance=0.5)
else:
p = example_grid.get_random_point(boundary_distance=1, avoid_center=True)
p = example_grid.point_from_cartesian(p)
i1 = f.interpolate(p, method="scipy_linear")
i2 = f.interpolate(p, method="numba")
np.testing.assert_almost_equal(i1, i2, err_msg=msg)
c = (1,) * len(example_grid.axes) # specific cell
p = f.grid.cell_coords[c]
np.testing.assert_allclose(
f.interpolate(p, method="scipy_linear"), f.data[(Ellipsis,) + c], err_msg=msg
)
np.testing.assert_allclose(
f.interpolate(p, method="numba"), f.data[(Ellipsis,) + c], err_msg=msg
)
@pytest.mark.parametrize("num", [1, 3])
def test_shapes_nfields(num, example_grid):
""" test single component field """
fields = [ScalarField.random_uniform(example_grid) for _ in range(num)]
field = FieldCollection(fields)
data_shape = (num,) + example_grid.shape
np.testing.assert_equal(field.data.shape, data_shape)
for pf_single in field:
np.testing.assert_equal(pf_single.data.shape, example_grid.shape)
field_c = field.copy()
np.testing.assert_allclose(field.data, field_c.data)
assert field.grid == field_c.grid
def test_arithmetics():
""" test simple arithmetics for fields """
grid = UnitGrid([2, 2])
for cls in (ScalarField, VectorField, Tensor2Field):
f1 = cls(grid, data=1)
f2 = cls(grid, data=2)
assert isinstance(str(f1), str)
np.testing.assert_allclose(f1.data, 1)
np.testing.assert_allclose((-f1).data, -1)
# test addition
np.testing.assert_allclose((f1 + 1).data, 2)
np.testing.assert_allclose((1 + f1).data, 2)
f1 += 1
np.testing.assert_allclose(f1.data, 2)
np.testing.assert_allclose((f1 + f2).data, 4)
# test subtraction
np.testing.assert_allclose((f1 - 1).data, 1)
np.testing.assert_allclose((1 - f1).data, -1)
f1 -= 1
np.testing.assert_allclose(f1.data, 1)
np.testing.assert_allclose((f1 - f2).data, -1)
# test multiplication
np.testing.assert_allclose((f1 * 2).data, 2)
np.testing.assert_allclose((2 * f1).data, 2)
f1 *= 2
np.testing.assert_allclose(f1.data, 2)
# test division
np.testing.assert_allclose((f1 / 2).data, 1)
with pytest.raises(TypeError):
np.testing.assert_allclose((2 / f1).data, 1)
f1 /= 2
np.testing.assert_allclose(f1.data, 1)
# test power
f1.data = 2
np.testing.assert_allclose((f1 ** 3).data, 8)
f1 **= 3
np.testing.assert_allclose(f1.data, 8)
# test applying a function
f1.data = 2
np.testing.assert_allclose(f1.apply(lambda x: x ** 3).data, 8)
f1.apply(lambda x: x ** 3, out=f1)
np.testing.assert_allclose(f1.data, 8)
def test_scalar_arithmetics():
""" test simple arithmetics involving scalar fields """
grid = UnitGrid([3, 4])
s = ScalarField(grid, data=2)
v = VectorField.random_uniform(grid)
for f in [v, FieldCollection([v])]:
f.data = s
assert f.data.shape == (2, 3, 4)
np.testing.assert_allclose(f.data, 2)
f += s
np.testing.assert_allclose(f.data, 4)
np.testing.assert_allclose((f + s).data, 6)
np.testing.assert_allclose((s + f).data, 6)
f -= s
np.testing.assert_allclose((f - s).data, 0)
np.testing.assert_allclose((s - f).data, 0)
f *= s
np.testing.assert_allclose(f.data, 4)
np.testing.assert_allclose((f * s).data, 8)
np.testing.assert_allclose((s * f).data, 8)
f /= s
np.testing.assert_allclose((f / s).data, 1)
with pytest.raises(TypeError):
s / f
with pytest.raises(TypeError):
s /= f
with pytest.raises(TypeError):
s *= f
def test_data_managment():
""" test how data is set """
grid = UnitGrid([2, 2])
for cls in (ScalarField, VectorField, Tensor2Field):
s1 = cls(grid, data=1)
np.testing.assert_allclose(s1.data, 1)
s2 = cls(grid)
np.testing.assert_allclose(s2.data, 0)
c = FieldCollection([s1, s2])
s1.data = 0
np.testing.assert_allclose(c.data, 0)
c.data = 2
np.testing.assert_allclose(s1.data, 2)
np.testing.assert_allclose(s2.data, 2)
c.data += 1
np.testing.assert_allclose(s1.data, 3)
np.testing.assert_allclose(s2.data, 3)
c[0].data += 2 # reference to s1
c[1].data *= 2 # reference to s2
np.testing.assert_allclose(s1.data, 5)
np.testing.assert_allclose(s2.data, 6)
c[0] = s2
np.testing.assert_allclose(c.data, 6)
# nested collections
with pytest.raises(RuntimeError):
FieldCollection([c])
@skipUnlessModule("h5py")
def test_hdf_input_output(tmp_path):
""" test writing and reading files """
grid = UnitGrid([4, 4])
s = ScalarField.random_uniform(grid, label="scalar")
v = VectorField.random_uniform(grid, label="vector")
t = Tensor2Field.random_uniform(grid, label="tensor")
col = FieldCollection([s, v, t], label="collection")
path = tmp_path / "test_hdf_input_output.hdf5"
for f in [s, v, t, col]:
f.to_file(path)
f2 = FieldBase.from_file(path)
assert f == f2
assert f.label == f2.label
assert isinstance(str(f), str)
assert isinstance(repr(f), str)
@skipUnlessModule("matplotlib")
def test_writing_images(tmp_path):
""" test writing and reading files """
from matplotlib.pyplot import imread
grid = UnitGrid([4, 4])
s = ScalarField.random_uniform(grid, label="scalar")
v = VectorField.random_uniform(grid, label="vector")
t = Tensor2Field.random_uniform(grid, label="tensor")
path = tmp_path / "test_writing_images.png"
for f in [s, v, t]:
f.to_file(path)
# try reading the file
with path.open("br") as fp:
imread(fp)
@pytest.mark.slow
def test_interpolation_to_grid_fields():
""" test whether data is interpolated correctly for different fields """
grid = CartesianGrid([[0, 2 * np.pi]] * 2, 6)
grid2 = CartesianGrid([[0, 2 * np.pi]] * 2, 8)
vf = VectorField.from_expression(grid, ["sin(y)", "cos(x)"])
sf = vf[0] # test extraction of fields
fc = FieldCollection([sf, vf])
for f in [sf, vf, fc]:
f2 = f.interpolate_to_grid(grid2, method="numba")
f3 = f2.interpolate_to_grid(grid, method="numba")
np.testing.assert_allclose(f.data, f3.data, atol=0.2, rtol=0.2)
@pytest.mark.slow
@pytest.mark.parametrize("field_cls", [ScalarField, VectorField, Tensor2Field])
def test_interpolation_values(field_cls):
""" test whether data is interpolated correctly for different fields """
grid = UnitGrid([3, 4])
f = field_cls.random_uniform(grid)
intp = f.make_interpolator("numba")
c = f.grid.cell_coords[2, 2]
np.testing.assert_allclose(intp(c), f.data[..., 2, 2])
with pytest.raises(ValueError):
intp(np.array([100, -100]))
res = f.make_interpolator("numba", fill=45)(np.array([100, -100]))
np.testing.assert_almost_equal(res, np.full(f.data_shape, 45))
@pytest.mark.slow
@pytest.mark.parametrize(
"grid",
[
UnitGrid((6,)),
PolarGrid(6, 4),
SphericalGrid(7, 4),
CylindricalGrid(6, (0, 8), (7, 8)),
],
)
def test_interpolation_to_cartesian(grid):
""" test whether data is interpolated correctly to Cartesian grid """
dim = grid.dim
vf = VectorField(grid, 2)
sf = vf[0] # test extraction of fields
fc = FieldCollection([sf, vf])
# subset
grid_cart = UnitGrid([4] * dim)
for f in [sf, fc]:
res = f.interpolate_to_grid(grid_cart)
np.testing.assert_allclose(res.data, 2)
# superset
grid_cart = UnitGrid([8] * dim)
for f in [sf, fc]:
res = f.interpolate_to_grid(grid_cart, fill=0)
assert res.data.min() == 0
assert res.data.max() == pytest.approx(2)
@pytest.mark.parametrize(
"grid", [PolarGrid(6, 4), SphericalGrid(7, 4), CylindricalGrid(6, (0, 8), (7, 8))]
)
def test_get_cartesian_grid(grid):
""" test whether Cartesian grids can be created """
cart = grid.get_cartesian_grid(mode="valid")
assert cart.volume < grid.volume
cart = grid.get_cartesian_grid(mode="full")
assert cart.volume > grid.volume
@skipUnlessModule("matplotlib")
def test_simple_plotting(example_grid):
""" test simple plotting of various fields on various grids """
vf = VectorField.random_uniform(example_grid)
tf = Tensor2Field.random_uniform(example_grid)
sf = tf[0, 0] # test extraction of fields
fc = FieldCollection([sf, vf])
for f in [sf, vf, tf, fc]:
f.plot(action="close")
f.plot(kind="line", action="close")
if example_grid.dim >= 2:
f.plot(kind="image", action="close")
if isinstance(f, VectorField) and example_grid.dim == 2:
f.plot(kind="quiver", action="close")
f.plot(kind="streamplot", action="close")
def test_random_uniform():
""" test whether random uniform fields behave correctly """
grid = UnitGrid([256, 256])
for field_cls in [ScalarField, VectorField, Tensor2Field]:
a = np.random.random()
b = 2 + np.random.random()
f = field_cls.random_uniform(grid, a, b)
assert np.mean(f.average) == pytest.approx((a + b) / 2, rel=0.02)
assert np.std(f.data) == pytest.approx(0.288675 * (b - a), rel=0.1)
np.testing.assert_allclose(f.real.data, f.data)
np.testing.assert_allclose(f.imag.data, 0)
def test_random_normal():
""" test whether random normal fields behave correctly """
grid = UnitGrid([256, 256])
for field_cls in [ScalarField, VectorField, Tensor2Field]:
m = np.random.random()
s = 1 + np.random.random()
for scaling in ["none", "physical"]:
f = field_cls.random_normal(grid, mean=m, std=s, scaling=scaling)
assert np.mean(f.average) == pytest.approx(m, rel=0.1, abs=0.1)
assert np.std(f.data) == pytest.approx(s, rel=0.1, abs=0.1)
@pytest.mark.parametrize("field_cls", [ScalarField, VectorField, Tensor2Field])
def test_random_colored(field_cls):
""" test whether random colored fields behave correctly """
grid = UnitGrid([128, 128])
exponent = np.random.uniform(-4, 4)
scale = 1 + np.random.random()
f = field_cls.random_colored(grid, exponent=exponent, scale=scale)
assert np.allclose(f.average, 0)
def test_fluctuations():
""" test the scaling of fluctuations """
for dim in [1, 2]:
for size in [256, 512]:
if dim == 1:
size **= 2
grid = CartesianGrid([[0, 1]] * dim, [size] * dim)
std = 1 + np.random.random()
for field_cls in [ScalarField, VectorField, Tensor2Field]:
s = field_cls.random_normal(
grid, mean=np.random.random(), std=std, scaling="physical"
)
expect = np.full([dim] * field_cls.rank, std)
np.testing.assert_allclose(s.fluctuations, expect, rtol=0.1)
def test_smoothing():
""" test smoothing on different grids """
for grid in [
CartesianGrid([[-2, 3]], 4),
UnitGrid(7, periodic=False),
UnitGrid(7, periodic=True),
]:
f1 = ScalarField.random_uniform(grid)
sigma = 0.5 + np.random.random()
# this assumes that the grid periodicity is the same for all axes
mode = "wrap" if grid.periodic[0] else "reflect"
s = sigma / grid.typical_discretization
expected = ndimage.gaussian_filter(f1.data, sigma=s, mode=mode)
out = f1.smooth(sigma)
np.testing.assert_allclose(out.data, expected)
out.data = 0 # reset data
f1.smooth(sigma, out=out).data
| np.testing.assert_allclose(out.data, expected) | numpy.testing.assert_allclose |
from __future__ import annotations
import scipy
import numpy as np
from warnings import warn
from .arrays import ImgArray, PropArray
from .arrays.utils import _docs
from .arrays.utils._corr import subpixel_pcc
from .utils.axesop import *
from .utils.utilcls import Progress
from .utils.deco import dims_to_spatial_axes
from ._cupy import xp, asnumpy, xp_ndi
from ._types import Dims
__all__ = ["fsc", "fourier_shell_correlation", "ncc", "zncc", "fourier_ncc", "fourier_zncc",
"nmi", "pcc_maximum", "ft_pcc_maximum", "pearson_coloc", "manders_coloc"]
@_docs.write_docs
@dims_to_spatial_axes
def fsc(img0: ImgArray,
img1: ImgArray,
nbin: int = 32,
r_max: float = None,
*,
squeeze: bool = True,
dims: Dims = None) -> PropArray:
r"""
Calculate Fourier Shell Correlation (FSC; or Fourier Ring Correlation, FRC, for 2-D images)
between two images. FSC is defined as:
.. math::
FSC(r) = \frac{Re(\sum_{r<r'<r+dr}[F_0(r') \cdot \bar{F_1}(r)])}
{\sqrt{\sum_{r<r'<r+dr}|F_0(r')|^2 \cdot \sum_{r<r'<r+dr}|F_1(r')|^2}}
Parameters
----------
{inputs_of_correlation}
nbin : int, default is 32
Number of bins.
r_max : float, optional
Maximum radius to make profile. Region 0 <= r < r_max will be split into `nbin` rings
(or shells). **Scale must be considered** because scales of each axis may vary.
{squeeze}
{dims}
Returns
-------
PropArray
FSC stored in x-axis by default. If input images have tzcyx-axes, then an array with
tcx-axes will be returned. Make sure x-axis no longer means length in x because images
are Fourier transformed.
"""
img0, img1 = _check_inputs(img0, img1)
spatial_shape = img0.sizesof(dims)
inds = xp.indices(spatial_shape)
center = [s/2 for s in spatial_shape]
r = xp.sqrt(sum(((x - c)/img0.scale[a])**2 for x, c, a in zip(inds, center, dims)))
r_lim = r.max()
# check r_max
if r_max is None:
r_max = r_lim
elif r_max > r_lim or r_max <= 0:
raise ValueError(f"`r_max` must be in range of 0 < r_max <= {r_lim} with this image.")
with Progress("fsc"):
# make radially separated labels
r_rel = r/r_max
labels = (nbin * r_rel).astype(np.uint16)
labels[r_rel >= 1] = 0
c_axes = complement_axes(dims, img0.axes)
nlabels = int(asnumpy(labels.max()))
out = xp.empty(img0.sizesof(c_axes)+(nlabels,), dtype=xp.float32)
def radial_sum(arr):
arr = xp.asarray(arr)
return xp_ndi.sum_labels(arr, labels=labels, index=xp.arange(1, nlabels+1))
f0 = img0.fft(dims=dims)
f1 = img1.fft(dims=dims)
for sl, f0_, f1_ in iter2(f0, f1, c_axes, exclude=dims):
cov = f0_.real*f1_.real + f0_.imag*f1_.imag
pw0 = f0_.real**2 + f0_.imag**2
pw1 = f1_.real**2 + f1_.imag**2
out[sl] = radial_sum(cov)/xp.sqrt(radial_sum(pw0)*radial_sum(pw1))
if out.ndim == 0 and squeeze:
out = out[()]
out = PropArray(asnumpy(out), dtype=np.float32, axes=c_axes+dims[-1],
dirpath=img0.dirpath, metadata=img0.metadata, propname="fsc")
return out
# alias
fourier_shell_correlation = fsc
def _ncc(img0: ImgArray, img1: ImgArray, dims: Dims):
# Basic Normalized Cross Correlation with batch processing
n = np.prod(img0.sizesof(dims))
if isinstance(dims, str):
dims = tuple(img0.axisof(a) for a in dims)
img0 = xp.asarray(img0)
img1 = xp.asarray(img1)
corr = xp.sum(img0 * img1, axis=dims) / (
xp.std(img0, axis=dims)*xp.std(img1, axis=dims)) / n
return asnumpy(corr)
def _masked_ncc(img0: ImgArray, img1: ImgArray, dims: Dims, mask: ImgArray):
if mask.ndim < img0.ndim:
mask = add_axes(img0.axes, img0.shape, mask, mask.axes)
n = np.prod(img0.sizesof(dims))
img0ma = np.ma.array(img0.value, mask=mask)
img1ma = np.ma.array(img1.value, mask=mask)
axis = tuple(img0.axisof(a) for a in dims)
return np.ma.sum(img0ma * img1ma, axis=axis) / (
np.ma.std(img0ma, axis=axis)*np.ma.std(img1ma, axis=axis)) / n
def _zncc(img0: ImgArray, img1: ImgArray, dims: Dims):
# Basic Zero-Normalized Cross Correlation with batch processing.
# Inputs must be already zero-normalized.
if isinstance(dims, str):
dims = tuple(img0.axisof(a) for a in dims)
img0 = xp.asarray(img0)
img1 = xp.asarray(img1)
corr = xp.sum(img0 * img1, axis=dims) / (
xp.sqrt(xp.sum(img0**2, axis=dims)*xp.sum(img1**2, axis=dims)))
return asnumpy(corr)
def _masked_zncc(img0: ImgArray, img1: ImgArray, dims: Dims, mask: ImgArray):
if mask.ndim < img0.ndim:
mask = add_axes(img0.axes, img0.shape, mask, mask.axes)
img0ma = np.ma.array(img0.value, mask=mask)
img1ma = np.ma.array(img1.value, mask=mask)
axis = tuple(img0.axisof(a) for a in dims)
return np.sum(img0ma * img1ma, axis=axis) / (
np.sqrt(np.sum(img0ma**2, axis=axis)*np.sum(img1ma**2, axis=axis)))
@_docs.write_docs
@dims_to_spatial_axes
def ncc(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
"""
Normalized Cross Correlation.
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
with Progress("ncc"):
img0, img1 = _check_inputs(img0, img1)
if mask is None:
corr = _ncc(img0, img1, dims)
else:
corr = _masked_ncc(img0, img1, dims, mask)
return _make_corr_output(corr, img0, "ncc", squeeze, dims)
@_docs.write_docs
@dims_to_spatial_axes
def zncc(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
"""
Zero-Normalized Cross Correlation.
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
with Progress("zncc"):
img0, img1 = _check_inputs(img0, img1)
img0zn = img0 - np.mean(img0, axis=dims, keepdims=True)
img1zn = img1 - np.mean(img1, axis=dims, keepdims=True)
if mask is None:
corr = _zncc(img0zn, img1zn, dims)
else:
corr = _masked_zncc(img0zn, img1zn, dims, mask)
return _make_corr_output(corr, img0, "zncc", squeeze, dims)
# alias
pearson_coloc = zncc
@_docs.write_docs
@dims_to_spatial_axes
def nmi(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
bins: int = 100,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
r"""
Normalized Mutual Information.
:math:`Y(A, B) = \frac{H(A) + H(B)}{H(A, B)}`
See "Elegant SciPy"
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
bins : int, default is 100
Number of bins to construct histograms.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
from scipy.stats import entropy
img0, img1 = _check_inputs(img0, img1)
c_axes = complement_axes(dims, img0.axes)
out = np.empty(img0.sizesof(c_axes), dtype=np.float32)
if mask.ndim < img0.ndim:
mask = add_axes(img0.axes, img0.shape, mask, mask.axes)
for sl, img0_, img1_ in iter2(img0, img1, c_axes):
mask_ = mask[sl]
hist, edges = np.histogramdd([np.ravel(img0_[mask_]),
np.ravel(img1_[mask_])], bins=bins)
hist /= np.sum(hist)
e1 = entropy(np.sum(hist, axis=0)) # Shannon entropy
e2 = entropy(np.sum(hist, axis=1))
e12 = entropy(np.ravel(hist)) # mutual entropy
out[sl] = (e1 + e2)/e12
return _make_corr_output(out, img0, "nmi", squeeze, dims)
@_docs.write_docs
@dims_to_spatial_axes
def fourier_ncc(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
"""
Normalized Cross Correlation in Fourier space.
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
with Progress("fourier_ncc"):
img0, img1 = _check_inputs(img0, img1)
f0 = np.sqrt(img0.power_spectra(dims=dims, zero_norm=True))
f1 = np.sqrt(img1.power_spectra(dims=dims, zero_norm=True))
if mask is None:
corr = _ncc(f0, f1, dims)
else:
corr = _masked_ncc(f0, f1, dims, mask)
return _make_corr_output(corr, img0, "fourier_ncc", squeeze, dims)
@_docs.write_docs
@dims_to_spatial_axes
def fourier_zncc(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
squeeze: bool = True,
*,
dims: Dims = None) -> PropArray | float:
"""
Zero-Normalized Cross Correlation in Fourier space.
Parameters
----------
{inputs_of_correlation}
mask : boolean ImgArray, optional
If provided, True regions will be masked and will not be taken into account when calculate
correlation.
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation value(s).
"""
with Progress("fourier_zncc"):
img0, img1 = _check_inputs(img0, img1)
f0 = np.sqrt(img0.power_spectra(dims=dims, zero_norm=True))
f1 = np.sqrt(img1.power_spectra(dims=dims, zero_norm=True))
f0 -= np.mean(f0, axis=dims, keepdims=True)
f1 -= np.mean(f1, axis=dims, keepdims=True)
if mask is None:
corr = _zncc(f0, f1, dims)
else:
corr = _masked_zncc(f0, f1, dims, mask)
return _make_corr_output(corr, img0, "fourier_zncc", squeeze, dims)
@_docs.write_docs
def pcc_maximum(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
upsample_factor: int = 10) -> np.ndarray:
"""
Calculate lateral shift between two images. Same as ``skimage.registration.phase_cross_correlation``.
Parameters
----------
{inputs_of_correlation}
upsample_factor : int, default is 10
Up-sampling factor when calculating phase cross correlation.
Returns
-------
np.ndarray
Shift in pixel.
"""
if img0 is img1:
return np.zeros(img0.ndim)
with Progress("pcc_maximum"):
img0, img1 = _check_inputs(img0, img1)
ft0 = img0.fft(dims=img0.axes)
ft1 = img1.fft(dims=img1.axes)
if mask is not None:
ft0[mask] = 0
shift = subpixel_pcc(xp.asarray(ft0.value), xp.asarray(ft1.value), upsample_factor)
return asnumpy(shift)
@_docs.write_docs
def ft_pcc_maximum(img0: ImgArray,
img1: ImgArray,
mask: ImgArray | None = None,
upsample_factor: int = 10) -> np.ndarray:
"""
Calculate lateral shift between two images.
This function takes Fourier transformed images as input. If you have to repetitively
use a same template image, this function is faster.
Parameters
----------
{inputs_of_correlation}
upsample_factor : int, default is 10
Up-sampling factor when calculating phase cross correlation.
Returns
-------
np.ndarray
Shift in pixel.
"""
with Progress("ft_pcc_maximum"):
_check_dimensions(img0, img1)
if mask is not None:
img0 = img0.copy()
img0[mask] = 0
shift = subpixel_pcc(xp.asarray(img0.value), xp.asarray(img1.value), upsample_factor)
return asnumpy(shift)
@_docs.write_docs
@dims_to_spatial_axes
def manders_coloc(img0: ImgArray,
img1: np.ndarray,
*,
squeeze: bool = True,
dims: Dims = None) -> PropArray | float:
r"""
Manders' correlation coefficient. This is defined as following:
:math:`r = \frac{\sum_{i \in I_{ref}} I_i}{\sum_{i} I_i}`
This value is NOT independent of background intensity. You need to correctly subtract
background from self. This value is NOT interchangable between channels.
Parameters
----------
{inputs_of_correlation}
{squeeze}
{dims}
Returns
-------
PropArray or float
Correlation coefficient(s).
"""
if img1.dtype != bool:
raise TypeError("`ref` must be a binary image.")
if img0.shape != img1.shape:
raise ValueError(f"Shape mismatch. `img0` has shape {img0.shape} but `img1` "
f"has shape {img1.shape}")
if img0.axes != img1.axes:
warn(f"Axes mismatch. `img0` has axes {img0.axes} but `img1` has axes {img1.axes}. "
"Result may be wrong due to this mismatch.", UserWarning)
img0 = img0.as_float()
total = np.sum(img0, axis=dims)
img0 = img0.copy()
img0[~img1] = 0
coeff = | np.sum(img0, axis=dims) | numpy.sum |
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.genmod.families import links
from tabulate import tabulate
from zepid.calc.utils import (risk_ci, incidence_rate_ci, risk_ratio, risk_difference, number_needed_to_treat,
odds_ratio, incidence_rate_difference, incidence_rate_ratio, sensitivity, specificity)
#########################################################################################################
# Measures of effect / association
#########################################################################################################
class RiskRatio:
r"""Estimate of Risk Ratio with a (1-alpha)*100% Confidence interval from a pandas DataFrame. Missing data is
ignored. Exposure categories should be mutually exclusive
Risk ratio is calculated from
.. math::
RR = \frac{\Pr(Y|A=1)}{\Pr(Y|A=0)}
Risk ratio standard error is
.. math::
SE = \left(\frac{1}{a} - \frac{1}{a + b} + \frac{1}{c} - \frac{1}{c + d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works supports binary outcomes
Parameters
------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the risk ratio in a data set
>>> from zepid import RiskRatio, load_sample_data
>>> df = load_sample_data(False)
>>> rr = RiskRatio()
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.summary()
Calculate the risk ratio with exposure of '1' as the reference category
>>> rr = RiskRatio(reference=1)
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.summary()
Generate a plot of the calculated risk ratio(s)
>>> import matplotlib.pyplot as plt
>>> rr = RiskRatio()
>>> rr.fit(df, exposure='art', outcome='dead')
>>> rr.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.risks = []
self.risk_ratio = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the Risk Ratio given a data set
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
risk_lcl = []
risk_ucl = []
risk_sd = []
rr_lcl = []
rr_ucl = []
rr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = risk_ci(events=self._c, total=(self._c + self._d), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
self.risk_ratio.append(1)
rr_lcl.append(None)
rr_ucl.append(None)
rr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
ri, lr, ur, sd, *_ = risk_ci(events=a, total=(a+b), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
em, lcl, ucl, sd, *_ = risk_ratio(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.risk_ratio.append(em)
rr_lcl.append(lcl)
rr_ucl.append(ucl)
rr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['Risk'] = self.risks
rf['SD(Risk)'] = risk_sd
rf['Risk_LCL'] = risk_lcl
rf['Risk_UCL'] = risk_ucl
rf['RiskRatio'] = self.risk_ratio
rf['SD(RR)'] = rr_sd
rf['RR_LCL'] = rr_lcl
rf['RR_UCL'] = rr_ucl
rf['CLR'] = rf['RR_UCL'] / rf['RR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Risk Ratio ')
print('======================================================================')
print(self.results[['Risk', 'SD(Risk)', 'Risk_LCL', 'Risk_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskRatio', 'SD(RR)', 'RR_LCL', 'RR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, measure='risk_ratio', scale='linear', center=1, **errorbar_kwargs):
"""Plot the risk ratios or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display risk ratios or risks. Default is to display the risk ratio. Options are;
* 'risk_ratio' : display risk ratios
* 'risk' : display risks
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. For the risk ratio, the reference line defaults to 1. For risks, no reference line is
displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'risk_ratio':
ax = _plotter(estimate=self.results['RiskRatio'], lcl=self.results['RR_LCL'], ucl=self.results['RR_UCL'],
labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Risk Ratio')
elif measure == 'risk':
ax = _plotter(estimate=self.results['Risk'], lcl=self.results['Risk_LCL'], ucl=self.results['Risk_UCL'],
labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Risk')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "risk_ratio" or "risk" for plots')
return ax
class RiskDifference:
r"""Estimate of Risk Difference with a (1-alpha)*100% Confidence interval from a pandas DataFrame. Missing data is
ignored. Exposure categories should be mutually exclusive
Risk difference is calculated as
.. math::
RD = \Pr(Y|A=1) - \Pr(Y|A=0)
Risk difference standard error is calculated as
.. math::
SE = \left(\frac{R_1 \times (1 - R_1)}{a+b} + \frac{R_0 \times (1-R_0)}{c+d}\right)^{\frac{1}{2}}
In addition to confidence intervals, the Frechet bounds are calculated as well. These probability bounds are useful
for a comparison. Within these bounds, the true causal risk difference in the sample must live. The only
assumptions these bounds require are no measurement error, causal consistency, no selection bias, and any missing
data is MCAR. These bounds are always unit width (width of one), but they do not require any assumptions regarding
confounding / conditional exchangeability. They are calculated via the following formula
.. math::
Lower = \Pr(Y|A=a)\Pr(A=a) - \Pr(Y|A \ne a)\Pr(A \ne a) - \Pr(A=a)\\
Upper = \Pr(Y|A=a)\Pr(A=a) + \Pr(A \ne a) - \Pr(Y|A \ne a)\Pr(A \ne a)
For further details on these bounds, see the references
Note
----
Outcome must be coded as (1: yes, 0:no). Only supports binary outcomes
Parameters
------------
reference : integer, optional
-reference category for comparisons. Default reference category is 0
alpha : float, optional
-Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
References
----------
Cole SR et al. (2019) Nonparametric Bounds for the Risk Function. American Journal of Epidemiology. 188(4), 632-636
Examples
--------
Calculate the risk difference in a data set
>>> from zepid import RiskDifference, load_sample_data
>>> df = load_sample_data(False)
>>> rd = RiskDifference()
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.summary()
Calculate the risk difference with exposure of '1' as the reference category
>>> rd = RiskDifference(reference=1)
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.summary()
Generate a plot of the calculated risk difference(s)
>>> import matplotlib.pyplot as plt
>>> rd = RiskDifference()
>>> rd.fit(df, exposure='art', outcome='dead')
>>> rd.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.risks = []
self.risk_difference = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self.n = None
def fit(self, df, exposure, outcome):
"""Calculates the Risk Difference
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
n = df.dropna(subset=[exposure, outcome]).shape[0]
# Setting up holders for results
risk_lcl = []
risk_ucl = []
risk_sd = []
rd_lcl = []
rd_ucl = []
rd_sd = []
fr_lower = []
fr_upper = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:' + str(self.reference))
ri, lr, ur, sd, *_ = risk_ci(events=self._c, total=(self._c + self._d), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
self.risk_difference.append(0)
rd_lcl.append(None)
rd_ucl.append(None)
rd_sd.append(None)
fr_lower.append(None)
fr_upper.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
ri, lr, ur, sd, *_ = risk_ci(events=a, total=(a + b), alpha=self.alpha)
self.risks.append(ri)
risk_lcl.append(lr)
risk_ucl.append(ur)
risk_sd.append(sd)
em, lcl, ucl, sd, *_ = risk_difference(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.risk_difference.append(em)
rd_lcl.append(lcl)
rd_ucl.append(ucl)
rd_sd.append(sd)
fr_lower.append(ri*((a+b)/n) - (1-ri)*(1 - (a+b)/n) - ((a+b)/n))
fr_upper.append(ri*((a+b)/n) + (1 - (a+b)/n) - (1-ri)*(1 - (a+b)/n))
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
self.n = n
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['Risk'] = self.risks
rf['SD(Risk)'] = risk_sd
rf['Risk_LCL'] = risk_lcl
rf['Risk_UCL'] = risk_ucl
rf['RiskDifference'] = self.risk_difference
rf['SD(RD)'] = rd_sd
rf['RD_LCL'] = rd_lcl
rf['RD_UCL'] = rd_ucl
rf['CLD'] = rf['RD_UCL'] - rf['RD_LCL']
rf['LowerBound'] = fr_lower
rf['UpperBound'] = fr_upper
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Risk Difference ')
print('======================================================================')
print(self.results[['Risk', 'SD(Risk)', 'Risk_LCL', 'Risk_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskDifference', 'SD(RD)', 'RD_LCL', 'RD_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['RiskDifference', 'CLD', 'LowerBound', 'UpperBound']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, measure='risk_difference', center=0, **errorbar_kwargs):
"""Plot the risk differences or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display risk differences or risks. Default is to display the risk difference. Options are;
* 'risk_difference' : display risk differences
* 'risk' : display risks
center : str, optional
Sets a reference line. For the risk difference, the reference line defaults to 0. For risks, no reference
line is displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'risk_difference':
ax = _plotter(estimate=self.results['RiskDifference'], lcl=self.results['RD_LCL'],
ucl=self.results['RD_UCL'], labels=self.results.index,
center=center, **errorbar_kwargs)
ax.set_title('Risk Difference')
elif measure == 'risk':
ax = _plotter(estimate=self.results['Risk'], lcl=self.results['Risk_LCL'], ucl=self.results['Risk_UCL'],
labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Risk')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "risk_difference" or "risk" for plots')
return ax
class NNT:
r"""Estimates of Number Needed to Treat. NNT (1-alpha)*100% confidence interval presentation is based on
Altman, DG (BMJ 1998). Missing data is ignored
Number needed to treat is calculated as
.. math::
NNT = \frac{1}{RD}
Risk difference the corresponding confidence intervals come from
.. math::
RD = \Pr(Y|A=1) - \Pr(Y|A=0)
Risk difference standard error is calculated as
.. math::
SE = \left(\frac{R_1 \times (1 - R_1)}{a+b} + \frac{R_0 \times (1-R_0)}{c+d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the number needed to treat in a data set
>>> from zepid import NNT, load_sample_data
>>> df = load_sample_data(False)
>>> nnt = NNT()
>>> nnt.fit(df, exposure='art', outcome='dead')
>>> nnt.summary()
Calculate the number needed to treat with '1' as the reference category
>>> nnt = NNT(reference=1)
>>> nnt.fit(df, exposure='art', outcome='dead')
>>> nnt.summary()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.number_needed_to_treat = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the NNT
Parameters
------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
nnt_lcl = []
nnt_ucl = []
nnt_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:' + str(self.reference))
self.number_needed_to_treat.append(np.inf)
nnt_lcl.append(None)
nnt_ucl.append(None)
nnt_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
em, lcl, ucl, sd, *_ = number_needed_to_treat(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.number_needed_to_treat.append(em)
nnt_lcl.append(lcl)
nnt_ucl.append(ucl)
nnt_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['NNT'] = self.number_needed_to_treat
rf['SD(RD)'] = nnt_sd
rf['NNT_LCL'] = nnt_lcl
rf['NNT_UCL'] = nnt_ucl
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for i, r in self.results.iterrows():
if i == self._labels[0]:
pass
else:
print('======================================================================')
print(' Number Needed to Treat/Harm ')
print('======================================================================')
if r['NNT'] == np.inf:
print('Number Needed to Treat = infinite')
else:
if r['NNT'] > 0:
print('Number Needed to Harm: ', round(abs(r['NNT']), decimal))
if r['NNT'] < 0:
print('Number Needed to Treat: ', round(abs(r['NNT']), decimal))
print('----------------------------------------------------------------------')
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: ')
if r['NNT_LCL'] < 0 < r['NNT_UCL']:
print('NNT ', round(abs(r['NNT_LCL']), decimal), 'to infinity to NNH ',
round(abs(r['NNT_UCL']), decimal))
elif 0 < r['NNT_LCL']:
print('NNT ', round(abs(r['NNT_LCL']), decimal), ' to ', round(abs(r['NNT_UCL']), decimal))
else:
print('NNH ', round(abs(r['NNT_LCL']), decimal), ' to ', round(abs(r['NNT_UCL']), decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
class OddsRatio:
r"""Estimates of Odds Ratio with a (1-alpha)*100% Confidence interval. Missing data is ignored
Odds ratio is calculated from
.. math::
OR = \frac{\Pr(Y|A=1)}{1 - \Pr(Y|A=1)} / \frac{\Pr(Y|A=0)}{1 - \Pr(Y|A=0)}
Odds ratio standard error is
.. math::
SE = \left(\frac{1}{a} + \frac{1}{b} + \frac{1}{c} + \frac{1}{d}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
---------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the odds ratio in a data set
>>> from zepid import OddsRatio, load_sample_data
>>> df = load_sample_data(False)
>>> ort = OddsRatio()
>>> ort.fit(df, exposure='art', outcome='dead')
>>> ort.summary()
Calculate the odds ratio with exposure of '1' as the reference category
>>> ort = OddsRatio(reference=1)
>>> ort.fit(df, exposure='art', outcome='dead')
>>> ort.summary()
Generate a plot of the calculated odds ratio(s)
>>> import matplotlib.pyplot as plt
>>> ort = OddsRatio()
>>> ort.fit(df, exposure='art', outcome='dead')
>>> ort.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.odds_ratio = []
self.results = None
self._a_list = []
self._b_list = []
self._c = None
self._d = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
def fit(self, df, exposure, outcome):
"""Calculates the Odds Ratio
Parameters
---------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
"""
# Setting up holders for results
odr_lcl = []
odr_ucl = []
odr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._d = df.loc[(df[exposure] == self.reference) & (df[outcome] == 0)].shape[0]
self._labels.append('Ref:'+str(self.reference))
self.odds_ratio.append(1)
odr_lcl.append(None)
odr_ucl.append(None)
odr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
b = df.loc[(df[exposure] == i) & (df[outcome] == 0)].shape[0]
self._b_list.append(b)
em, lcl, ucl, sd, *_ = odds_ratio(a=a, b=b, c=self._c, d=self._d, alpha=self.alpha)
self.odds_ratio.append(em)
odr_lcl.append(lcl)
odr_ucl.append(ucl)
odr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['OddsRatio'] = self.odds_ratio
rf['SD(OR)'] = odr_sd
rf['OR_LCL'] = odr_lcl
rf['OR_UCL'] = odr_ucl
rf['CLR'] = rf['OR_UCL'] / rf['OR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
---------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, b, l in zip(self._a_list, self._b_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, b], ['E=0', self._c, self._d]], headers=['', 'D=1', 'D=0'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Odds Ratio ')
print('======================================================================')
print(self.results[['OddsRatio', 'SD(OR)', 'OR_LCL', 'OR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('======================================================================')
def plot(self, scale='linear', center=1, **errorbar_kwargs):
"""Plot the odds ratios along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. The reference line defaults to 1.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See defaults here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
ax = _plotter(estimate=self.results['OddsRatio'], lcl=self.results['OR_LCL'], ucl=self.results['OR_UCL'],
labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Odds Ratio')
return ax
class IncidenceRateRatio:
r"""Estimates of Incidence Rate Ratio with a (1-alpha)*100% Confidence interval. Missing data is ignored
Incidence rate ratio is calculated from
.. math::
IR = \frac{a}{t_1} / \frac{c}{t_0}
Incidence rate ratio standard error is
.. math::
SE = \left(\frac{1}{a} + \frac{1}{c}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
------------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the incidence rate ratio in a data set
>>> from zepid import IncidenceRateRatio, load_sample_data
>>> df = load_sample_data(False)
>>> irr = IncidenceRateRatio()
>>> irr.fit(df, exposure='art', outcome='dead', time='t')
>>> irr.summary()
Calculate the incidence rate ratio with exposure of '1' as the reference category
>>> irr = IncidenceRateRatio(reference=1)
>>> irr.fit(df, exposure='art', outcome='dead', time='t')
>>> irr.summary()
Generate a plot of the calculated incidence rate ratio(s)
>>> import matplotlib.pyplot as plt
>>> irr = IncidenceRateRatio()
>>> irr.fit(df, exposure='art', outcome='dead', time='t')
>>> irr.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.incidence_rate = []
self.incidence_rate_ratio = []
self.results = None
self._a_list = []
self._a_time_list = []
self._c = None
self._c_time = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self._missing_t = None
def fit(self, df, exposure, outcome, time):
"""Calculate the Incidence Rate Ratio
Parameters
------------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable
outcome : string
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
time : string
Column name of time contributed
"""
# Setting up holders for results
ir_lcl = []
ir_ucl = []
ir_sd = []
irr_lcl = []
irr_ucl = []
irr_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._c_time = df.loc[df[exposure] == self.reference][time].sum()
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = incidence_rate_ci(events=self._c, time=self._c_time, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
self.incidence_rate_ratio.append(1)
irr_lcl.append(None)
irr_ucl.append(None)
irr_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
a_t = df.loc[df[exposure] == i][time].sum()
self._a_time_list.append(a_t)
ri, lr, ur, sd, *_ = incidence_rate_ci(events=a, time=a_t, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
em, lcl, ucl, sd, *_ = incidence_rate_ratio(a=a, t1=a_t, c=self._c, t2=self._c_time, alpha=self.alpha)
self.incidence_rate_ratio.append(em)
irr_lcl.append(lcl)
irr_ucl.append(ucl)
irr_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
self._missing_t = df.loc[df[time].isnull()].shape[0]
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['IncRate'] = self.incidence_rate
rf['SD(IncRate)'] = ir_sd
rf['IncRate_LCL'] = ir_lcl
rf['IncRate_UCL'] = ir_ucl
rf['IncRateRatio'] = self.incidence_rate_ratio
rf['SD(IRR)'] = irr_sd
rf['IRR_LCL'] = irr_lcl
rf['IRR_UCL'] = irr_ucl
rf['CLR'] = rf['IRR_UCL'] / rf['IRR_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
------------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, a_t, l in zip(self._a_list, self._a_time_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, a_t], ['E=0', self._c, self._c_time]], headers=['', 'D=1', 'Person-time'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Incidence Rate Ratio ')
print('======================================================================')
print(self.results[['IncRate', 'SD(IncRate)', 'IncRate_LCL', 'IncRate_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['IncRateRatio', 'SD(IRR)', 'IRR_LCL', 'IRR_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('Missing T: ', self._missing_t)
print('======================================================================')
def plot(self, measure='incidence_rate_ratio', scale='linear', center=1, **errorbar_kwargs):
"""Plot the risk ratios or the risks along with their corresponding confidence intervals. This option is an
alternative to `summary()`, which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display incidence rate ratios or incidence rates. Default is to display the incidence rate ratio.
Options are;
* 'incidence_rate_ratio' : display incidence rate ratios
* 'incidence_rate' : display incidence rates
scale : str, optional
Scale for the x-axis. Default is a linear scale. A log-scale can be requested by setting scale='log'
center : str, optional
Sets a reference line. For the incidence rate ratio, the reference line defaults to 1. For incidence rates,
no reference line is displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See defaults here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'incidence_rate_ratio':
ax = _plotter(estimate=self.results['IncRateRatio'], lcl=self.results['IRR_LCL'],
ucl=self.results['IRR_UCL'], labels=self.results.index,
center=center, **errorbar_kwargs)
if scale == 'log':
ax.set_xscale('log')
ax.set_title('Incidence Rate Ratio')
elif measure == 'incidence_rate':
ax = _plotter(estimate=self.results['IncRate'], lcl=self.results['IncRate_LCL'],
ucl=self.results['IncRate_UCL'], labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Incidence Rate')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "incidence_rate_ratio" or "incidence_rate" for plots')
return ax
class IncidenceRateDifference:
r"""Estimates of Incidence Rate Difference with a (1-alpha)*100% Confidence interval. Missing data is ignored.
Incidence rate difference is calculated from
.. math::
ID = \frac{a}{t_1} - \frac{c}{t_0}
Incidence rate difference standard error is
.. math::
SE = \left(\frac{a}{t_1^2} + \frac{c}{t_0^2}\right)^{\frac{1}{2}}
Note
----
Outcome must be coded as (1: yes, 0:no). Only works for binary outcomes
Parameters
----------------
reference : integer, optional
Reference category for comparisons. Default reference category is 0
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the incidence rate difference in a data set
>>> from zepid import IncidenceRateDifference, load_sample_data
>>> df = load_sample_data(False)
>>> ird = IncidenceRateDifference()
>>> ird.fit(df, exposure='art', outcome='dead', time='t')
>>> ird.summary()
Calculate the incidence rate difference with exposure of '1' as the reference category
>>> ird = IncidenceRateDifference(reference=1)
>>> ird.fit(df, exposure='art', outcome='dead', time='t')
>>> ird.summary()
Generate a plot of the calculated incidence rate difference(s)
>>> import matplotlib.pyplot as plt
>>> ird = IncidenceRateDifference()
>>> ird.fit(df, exposure='art', outcome='dead', time='t')
>>> ird.plot()
>>> plt.show()
"""
def __init__(self, reference=0, alpha=0.05):
self.reference = reference
self.alpha = alpha
self.incidence_rate = []
self.incidence_rate_difference = []
self.results = None
self._a_list = []
self._a_time_list = []
self._c = None
self._c_time = None
self._labels = []
self._fit = False
self._missing_e = None
self._missing_d = None
self._missing_ed = None
self._missing_t = None
def fit(self, df, exposure, outcome, time):
"""Calculates the Incidence Rate Difference
Parameters
----------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : str
Column name of exposure variable
outcome : str
Column name of outcome variable. Must be coded as binary (0,1) where 1 is the outcome of interest
time : str
Column name of time variable
"""
# Setting up holders for results
ir_lcl = []
ir_ucl = []
ir_sd = []
ird_lcl = []
ird_ucl = []
ird_sd = []
# Getting unique values and dropping reference
vals = set(df[exposure].dropna().unique())
vals.remove(self.reference)
self._c = df.loc[(df[exposure] == self.reference) & (df[outcome] == 1)].shape[0]
self._c_time = df.loc[df[exposure] == self.reference][time].sum()
self._labels.append('Ref:'+str(self.reference))
ri, lr, ur, sd, *_ = incidence_rate_ci(events=self._c, time=self._c_time, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
self.incidence_rate_difference.append(0)
ird_lcl.append(None)
ird_ucl.append(None)
ird_sd.append(None)
# Going through all the values
for i in vals:
self._labels.append(str(i))
a = df.loc[(df[exposure] == i) & (df[outcome] == 1)].shape[0]
self._a_list.append(a)
a_t = df.loc[df[exposure] == i][time].sum()
self._a_time_list.append(a_t)
ri, lr, ur, sd, *_ = incidence_rate_ci(events=a, time=a_t, alpha=self.alpha)
self.incidence_rate.append(ri)
ir_lcl.append(lr)
ir_ucl.append(ur)
ir_sd.append(sd)
em, lcl, ucl, sd, *_ = incidence_rate_difference(a=a, t1=a_t, c=self._c, t2=self._c_time, alpha=self.alpha)
self.incidence_rate_difference.append(em)
ird_lcl.append(lcl)
ird_ucl.append(ucl)
ird_sd.append(sd)
# Getting the extent of missing data
self._missing_ed = df.loc[(df[exposure].isnull()) & (df[outcome].isnull())].shape[0]
self._missing_e = df.loc[df[exposure].isnull()].shape[0] - self._missing_ed
self._missing_d = df.loc[df[outcome].isnull()].shape[0] - self._missing_ed
self._missing_t = df.loc[df[time].isnull()].shape[0]
# Setting up results
rf = pd.DataFrame(index=self._labels)
rf['IncRate'] = self.incidence_rate
rf['SD(IncRate)'] = ir_sd
rf['IncRate_LCL'] = ir_lcl
rf['IncRate_UCL'] = ir_ucl
rf['IncRateDiff'] = self.incidence_rate_difference
rf['SD(IRD)'] = ird_sd
rf['IRD_LCL'] = ird_lcl
rf['IRD_UCL'] = ird_ucl
rf['CLD'] = rf['IRD_UCL'] - rf['IRD_LCL']
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
----------------
decimal : integer, optional
Decimal places to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
for a, a_t, l in zip(self._a_list, self._a_time_list, self._labels):
print('Comparison:'+str(self.reference)+' to '+self._labels[self._labels.index(l)+1])
print(tabulate([['E=1', a, a_t], ['E=0', self._c, self._c_time]], headers=['', 'D=1', 'Person-time'],
tablefmt='grid'), '\n')
print('======================================================================')
print(' Incidence Rate Difference ')
print('======================================================================')
print(self.results[['IncRate', 'SD(IncRate)', 'IncRate_LCL', 'IncRate_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print(self.results[['IncRateDiff', 'SD(IRD)', 'IRD_LCL', 'IRD_UCL']].round(decimals=decimal))
print('----------------------------------------------------------------------')
print('Missing E: ', self._missing_e)
print('Missing D: ', self._missing_d)
print('Missing E&D: ', self._missing_ed)
print('Missing T: ', self._missing_t)
print('======================================================================')
def plot(self, measure='incidence_rate_difference', center=0, **errorbar_kwargs):
"""Plot the incidence rate differences or the incidence rates along with their corresponding confidence
intervals. This option is an alternative to summary(), which displays results in a table format.
Parameters
----------
measure : str, optional
Whether to display incidence rate ratios or incidence rates. Default is to display the incidence rate
differences. Options are;
* 'incidence_rate_difference' : display incidence rate differences
* 'incidence_rate' : display incidence rates
center : str, optional
Sets a reference line. For the incidence rate difference, the reference line defaults to 0. For incidence
rates, no reference line is displayed.
errorbar_kwargs: add additional kwargs to be passed to the plotting function ``matplotlib.errorbar``. See
defaults here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
Returns
-------
matplotlib axes
"""
if measure == 'incidence_rate_difference':
ax = _plotter(estimate=self.results['IncRateDiff'], lcl=self.results['IRD_LCL'],
ucl=self.results['IRD_UCL'], labels=self.results.index,
center=center, **errorbar_kwargs)
ax.set_title('Incidence Rate Difference')
elif measure == 'incidence_rate':
ax = _plotter(estimate=self.results['IncRate'], lcl=self.results['IncRate_LCL'],
ucl=self.results['IncRate_UCL'], labels=self.results.index,
center=np.nan, **errorbar_kwargs)
ax.set_title('Incidence Rate')
ax.set_xlim([0, 1])
else:
raise ValueError('Must specify either "incidence_rate_difference" or "incidence_rate" for plots')
return ax
def _plotter(estimate, lcl, ucl, labels, center=0, **errorbar_kwargs):
"""
Plot functionality to be used by all the measure classes. Internal functional for all the other plotting
functionalities.
The main function is matplotlib.errorbar, see defaults here:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.errorbar.html
"""
ypoints = np.arange(len(labels))
ax = plt.gca()
errorbar_kwargs.setdefault('fmt', 'o')
errorbar_kwargs.setdefault('color', 'k')
absolute_errors_from_estimate = np.abs(estimate.values - np.vstack((lcl, ucl)))
ax.errorbar(estimate, ypoints, xerr=absolute_errors_from_estimate, **errorbar_kwargs)
if not np.isnan(center):
ax.axvline(center, zorder=1, color='gray')
ax.set_yticklabels(labels)
ax.set_yticks(ypoints)
return ax
#########################################################################################################
# Testing measures
#########################################################################################################
class Sensitivity:
r"""Generates the sensitivity and (1-alpha)% confidence interval, comparing test results to disease status
from pandas dataframe
Sensitivity is calculated from
.. math::
Sensitivity = \frac{TP}{P}
Wald standard error is
.. math::
SE_{Wald} = \left(\frac{1}{TP} - \frac{1}{P}\right)^{\frac{1}{2}}
Note
----
Disease & Test must be coded as (1: yes, 0:no)
Parameters
--------------------
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the sensitivity in a data set
>>> from zepid import Sensitivity, load_sample_data
>>> df = load_sample_data(False)
>>> sens = Sensitivity()
>>> sens.fit(df, test='art', disease='dead') # Note this example is not great... ART is a treatment not test
>>> sens.summary()
"""
def __init__(self, alpha=0.05):
self.alpha = alpha
self.sensitivity = None
self.results = None
self._fit = False
self._a = None
self._b = None
def fit(self, df, test, disease):
"""Calculates the Sensitivity
Parameters
-----------------
df : DataFrame
Pandas dataframe containing variables of interest
test : string
Column name of test results to detect the outcome. Needs to be coded as binary (0,1), where 1 indicates a
positive test for the individual
disease : string
Column name of true outcomes status. Needs to be coded as binary (0,1), where 1 indicates the individual
has the outcome
"""
self._a = df.loc[(df[test] == 1) & (df[disease] == 1)].shape[0]
self._b = df.loc[(df[test] == 1) & (df[disease] == 0)].shape[0]
se, ls, us, sd = sensitivity(detected=self._a, cases=(self._a + self._b), alpha=self.alpha)
self.sensitivity = se
# Setting up results
rf = pd.DataFrame()
rf['Sensitivity'] = [se]
rf['SD(Se)'] = [sd]
rf['Se_LCL'] = [ls]
rf['Se_UCL'] = [us]
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
-----------------
decimal : integer, optional
Decimal places to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
print(tabulate([['T+', self._a, self._b]], headers=['', 'D+', 'D-'], tablefmt='grid'), '\n')
print('======================================================================')
print(' Sensitivity ')
print('======================================================================')
print(self.results[['Sensitivity', 'SD(Se)', 'Se_LCL', 'Se_UCL']].round(decimals=decimal))
print('======================================================================')
class Specificity:
r"""Generates the sensitivity and (1-alpha)% confidence interval, comparing test results to disease status
from pandas dataframe
Specificity is calculated from
.. math::
Sp = \frac{FN}{N}
Wald standard error is
.. math::
SE_{Wald} = (\frac{1}{FN} - \frac{1}{N})^{\frac{1}{2}}
Note
----
Disease & Test must be coded as (1: yes, 0:no)
Parameters
-----------
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the specificity in a data set
>>> from zepid import Specificity, load_sample_data
>>> df = load_sample_data(False)
>>> spec = Specificity()
>>> spec.fit(df, test='art', disease='dead') # Note this example is not great... ART is a treatment not test
>>> spec.summary()
"""
def __init__(self, alpha=0.05):
self.alpha = alpha
self.specificity = None
self.results = None
self._fit = False
self._c = None
self._d = None
def fit(self, df, test, disease):
"""Calculates specificity
Parameters
-------------
df : DataFrame
Pandas dataframe containing variables of interest
test : string
Column name of test results to detect the outcome. Needs to be coded as binary (0,1), where 1 indicates a
positive test for the individual
disease : string
Column name of true outcomes status. Needs to be coded as binary (0,1), where 1 indicates the individual
has the outcome
"""
self._c = df.loc[(df[test] == 0) & (df[disease] == 1)].shape[0]
self._d = df.loc[(df[test] == 0) & (df[disease] == 0)].shape[0]
sp, ls, us, sd = specificity(detected=self._c, noncases=(self._c + self._d), alpha=self.alpha)
self.specificity = sp
# Setting up results
rf = pd.DataFrame()
rf['Specificity'] = [sp]
rf['SD(Sp)'] = [sd]
rf['Sp_LCL'] = [ls]
rf['Sp_UCL'] = [us]
self.results = rf
self._fit = True
def summary(self, decimal=3):
"""Prints the summary results
Parameters
-------------
decimal : integer, optional
Decimal places to display. Default is 3
"""
if self._fit is False:
raise ValueError('fit() function must be completed before results can be obtained')
print(tabulate([['T-', self._c, self._d]], headers=['', 'D+', 'D-'], tablefmt='grid'), '\n')
print('======================================================================')
print(' Specificity ')
print('======================================================================')
print(self.results[['Specificity', 'SD(Sp)', 'Sp_LCL', 'Sp_UCL']].round(decimals=decimal))
print('======================================================================')
class Diagnostics:
r"""Generates the Sensitivity, Specificity, and the corresponding (1-alpha)% confidence intervals, comparing test
results to disease status from pandas DataFrame
Sensitivity is calculated from
.. math::
Se = \frac{TP}{P}
Wald standard error is
.. math::
SE_{Wald} = \left(\frac{1}{TP} - \frac{1}{P}\right)^{\frac{1}{2}}
Specificity is calculated from
.. math::
Sp = \frac{FN}{N}
Wald standard error is
.. math::
SE_{Wald} = \left(\frac{1}{FN} - \frac{1}{N}\right)^{\frac{1}{2}}
Note
----
Disease & Test must be coded as (1: yes, 0:no)
Parameters
-------------
alpha : float, optional
Alpha value to calculate two-sided Wald confidence intervals. Default is 95% confidence interval
Examples
--------
Calculate the sensitivity and specificity in a data set
>>> from zepid import Diagnostics, load_sample_data
>>> df = load_sample_data(False)
>>> diag = Diagnostics()
>>> diag.fit(df, test='art', disease='dead') # Note this example is not great... ART is a treatment not test
>>> diag.summary()
"""
def __init__(self, alpha=0.05):
self.alpha = alpha
self.sensitivity = None
self.specificity = None
self.results = None
self._fit = False
self._a = None
self._b = None
self._c = None
self._d = None
def fit(self, df, test, disease):
"""Calculates sensitivity and specificity
Parameters
----------------
df : DataFrame
Pandas dataframe containing variables of interest
test : string
Column name of test results to detect the outcome. Needs to be coded as binary (0,1), where 1 indicates a
positive test for the individual
disease : string
Column name of true outcomes status. Needs to be coded as binary (0,1), where 1 indicates the individual
has the outcome
"""
self.sensitivity = Sensitivity(alpha=self.alpha)
self.sensitivity.fit(df=df, test=test, disease=disease)
self.specificity = Specificity(alpha=self.alpha)
self.specificity.fit(df=df, test=test, disease=disease)
def summary(self, decimal=3):
"""Prints the results
Parameters
-------------
decimal : integer, optional
Decimal points to display. Default is 3
"""
print(tabulate([['T+', self.sensitivity._a, self.sensitivity._b],
['T-', self.specificity._c, self.specificity._d]],
headers=['', 'D+', 'D-'], tablefmt='grid'), '\n')
print('======================================================================')
print(' Diagnostics ')
print('======================================================================')
print(self.sensitivity.results[['Sensitivity', 'SD(Se)', 'Se_LCL', 'Se_UCL']].round(decimals=decimal))
print(self.specificity.results[['Specificity', 'SD(Sp)', 'Sp_LCL', 'Sp_UCL']].round(decimals=decimal))
print('======================================================================')
#########################################################################################################
# Interaction contrasts
#########################################################################################################
def interaction_contrast(df, exposure, outcome, modifier, adjust=None, decimal=3, print_results=True):
r"""Calculate the Interaction Contrast (IC) using a pandas dataframe and statsmodels to fit a linear
binomial regression. Can ONLY be used for a 0,1 coded exposure and modifier (exposure = {0,1}, modifier = {0,1},
outcome = {0,1}). Can handle adjustment for other confounders in the regression model. Prints the fit
of the linear binomial regression, the IC, and the corresponding IC 95% confidence interval.
Interaction Contrast is defined as the following
.. math::
IC = RD_{11} - RD_{10} - RD_{01}
Note
----
statsmodels may produce a domain error in some versions.
Parameters
----------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable. Must be coded as (0,1) where 1 is exposure
outcome : string
Column name of outcome variable. Must be coded as (0,1) where 1 is outcome of interest
modifier : string
Column name of modifier variable. Must be coded as (0,1) where 1 is modifier
adjust : string, optional
String of other variables to adjust for, in correct statsmodels format. Default is None. Variables can *NOT* be
named {E1M0,E0M1,E1M1} since this function creates variables with those names. Answers will be incorrect
Example of accepted input is 'C1 + C2 + C3 + Z'
decimal : integer, optional
Decimal places to display in result. Default is 3
print_results : bool, optional
Whether to print results from interaction contrast assessment
Examples
--------
Setting up environment
>>> from zepid import interaction_contrast, load_sample_data
>>> df = load_sample_data(False)
Calculating interaction contrast for ART and gender
>>> interaction_contrast(df, exposure='art', outcome='dead', modifier='male')
"""
if adjust is None:
eq = outcome + ' ~ ' + exposure + ' + ' + modifier + ' + ' + exposure + ':' + modifier
else:
eq = outcome + ' ~ ' + exposure + ' + ' + modifier + ' + ' + exposure + ':' + modifier + ' + ' + adjust
f = sm.families.family.Binomial(sm.families.links.identity())
model = smf.glm(eq, df, family=f).fit()
ic = model.params[exposure + ':' + modifier]
lcl = model.conf_int().loc[exposure + ':' + modifier][0]
ucl = model.conf_int().loc[exposure + ':' + modifier][1]
if print_results:
print(model.summary())
print('\n======================================================================')
print(' Interaction Contrast ')
print('======================================================================')
print('IC:\t\t' + str(round(ic, decimal)))
print('95% CI:\t\t(' + str(round(lcl, decimal)) + ', ' + str(round(ucl, decimal)) + ')')
print('======================================================================')
return ic, lcl, ucl
def interaction_contrast_ratio(df, exposure, outcome, modifier, adjust=None, regression='logit', ci='delta',
b_sample=200, alpha=0.05, decimal=5, print_results=True):
r"""Calculate the Interaction Contrast Ratio (ICR) using a pandas dataframe, and conducts either log binomial
or logistic regression through statsmodels. Can ONLY be used for a 0,1 coded exposure and modifier (exposure =
{0,1}, modifier = {0,1}, outcome = {0,1}). Can handle missing data and adjustment for other confounders in the
regression model. Prints the fit of the binomial regression, the ICR, and the corresponding ICR confidence interval
Interaction contrast ratio is defined as
.. math::
ICR = RR_{11} - RR_{10} - RR_{01} + 1
Confidence intervals can be generated either through a bootstrap procedure or using the delta method
Parameters
---------------
df : DataFrame
Pandas dataframe containing variables of interest
exposure : string
Column name of exposure variable. Must be coded as (0,1) where 1 is exposure
outcome : string
Column name of outcome variable. Must be coded as (0,1) where 1 is outcome of interest
modifier : string
Column name of modifier variable. Must be coded as (0,1) where 1 is modifier
adjust : string, optional
String of other variables to adjust for, in correct statsmodels format. Default is None. Variables can *NOT* be
named {E1M0,E0M1,E1M1} since this function creates variables with those names. Answers will be incorrect
Example of accepted input is 'C1 + C2 + C3 + Z'
regression : string, optional
Type of regression model to fit. Default is 'log' which fits the log-binomial model. Options include:
* 'log' Log-binomial model. Estimates the Risk Ratio
* 'logit' Logistic model. Estimates Odds Ratio. Only valid when odds ratio approximates the risk ratio
ci : string, optional
Type of confidence interval to return. Default is the delta method. Options include:
* 'delta': Delta method as described by <NAME> Lemeshow (1992)
* 'bootstrap': bootstrap method (Assmann et al. 1996). The delta method is more time efficient than bootstrap
b_sample : integer, optional
Number of times to resample to generate bootstrap confidence intervals. Only used if bootstrap confidence
intervals are requested. Default is 1000
alpha : float, optional
Alpha level for confidence interval. Default is 0.05, which returns 95% confidence intervals
decimal : integer, optional
Decimal places to display in result. Default is 3
print_results : bool, optional
Whether to print results from interaction contrast assessment
Note
----
statsmodels may produce a domain error for log binomial models in some versions
Examples
--------
Setting up environment
>>> from zepid import interaction_contrast_ratio, load_sample_data
>>> df = load_sample_data(False)
Calculating interaction contrast ratio for ART and gender
>>> interaction_contrast_ratio(df, exposure='art', outcome='dead', modifier='male')
Calculating interaction contrast ratio for ART and gender, confidence intervals from bootstrap
>>> interaction_contrast_ratio(df, exposure='art', outcome='dead', modifier='male', ci='bootstrap')
"""
if regression == 'logit':
f = sm.families.family.Binomial()
warnings.warn('Using the Odds Ratio to calculate the ICR is only valid when the OR approximates the RR',
UserWarning)
elif regression == 'log':
f = sm.families.family.Binomial(sm.families.links.log())
else:
raise ValueError("Either 'logit' or 'log' must be specified for the regression parameter")
df = df.copy()
df['_'+exposure] = np.where(df[modifier] == 0, df[exposure], 0)
df['_'+modifier] = np.where(df[exposure] == 0, df[modifier], 0)
if adjust is None:
eq = outcome + ' ~ ' + '_'+exposure + ' + ' + '_'+modifier + ' + ' + exposure + ':' + modifier
else:
eq = outcome + ' ~ ' + '_'+exposure + ' + ' + '_'+modifier + ' + ' + exposure + ':' + modifier + ' + ' + adjust
model = smf.glm(eq, df, family=f).fit()
em10 = np.exp(model.params['_'+exposure])
em01 = np.exp(model.params['_'+modifier])
em11 = np.exp(model.params[exposure + ':' + modifier])
em_expect = em10 + em01 - 1
icr = em11 - em_expect
zalpha = norm.ppf((1 - alpha / 2), loc=0, scale=1)
if ci == 'delta':
cov_matrix = model.cov_params()
vb10 = cov_matrix.loc['_'+exposure]['_'+exposure]
vb01 = cov_matrix.loc['_'+modifier]['_'+modifier]
vb11 = cov_matrix.loc[exposure + ':' + modifier][exposure + ':' + modifier]
cvb10_01 = cov_matrix.loc['_'+exposure]['_'+modifier]
cvb10_11 = cov_matrix.loc['_'+exposure][exposure + ':' + modifier]
cvb01_11 = cov_matrix.loc['_'+modifier][exposure + ':' + modifier]
var_icr = (((em10 ** 2) * vb10) + ((em01 ** 2) * vb01) + ((em11 ** 2) * vb11) +
(em10 * em01 * 2 * cvb10_01) + (-1 * em10 * em11 * 2 * cvb10_11) +
(-1 * em01 * em11 * 2 * cvb01_11))
icr_lcl = icr - zalpha * np.sqrt(var_icr)
icr_ucl = icr + zalpha * np.sqrt(var_icr)
elif ci == 'bootstrap':
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
bse_icr = []
ul = 1 - alpha / 2
ll = 0 + alpha / 2
for i in range(b_sample):
dfs = df.sample(n=df.shape[0], replace=True)
try:
bmodel = smf.glm(eq, dfs, family=f).fit()
em_bexpect = np.exp(bmodel.params['_'+exposure]) + np.exp(bmodel.params['_'+modifier]) - 1
bicr = np.exp(bmodel.params[exposure + ':' + modifier]) - em_bexpect
sigma = bicr - icr
bse_icr.append(sigma)
except:
bse_icr.append(np.nan)
se = np.std(bse_icr)
icr_lcl = icr - zalpha * se
icr_ucl = icr + zalpha * se
else:
raise ValueError('Please specify a supported confidence interval type')
if print_results:
print(model.summary())
print('\n======================================================================')
print(' Interaction Contrast Ratio ')
print('======================================================================')
if regression == 'logit':
print('ICR based on Odds Ratio\t\tAlpha = ' + str(alpha))
print('Note: Using the Odds Ratio to calculate the ICR is only valid when'
'the OR approximates the RR')
elif regression == 'log':
print('ICR based on Risk Ratio\t\tAlpha = ' + str(alpha))
print('----------------------------------------------------------------------')
print('ICR:\t\t' + str(round(icr, decimal)))
print('CI:\t\t(' + str(round(icr_lcl, decimal)) + ', ' + str(round(icr_ucl, decimal)) + ')')
print('======================================================================')
return icr, icr_lcl, icr_ucl
#########################################################################################################
# Other
#########################################################################################################
def create_spline_transform(array, n_knots=3, knots=None, term=1, restricted=False):
"""Creates spline dummy variables based on either user specified knot locations or automatically
determines knot locations based on percentiles. Options are available to set the number of knots,
location of knots (value), term (linear, quadratic, etc.), and restricted/unrestricted.
Parameters
--------------
array:
n_knots : integer, optional
Number of knots requested. Options for knots include any positive integer if the location of knots are
specified. If knot locations are not specified, n_knots must be an integer between 1 to 7. Default is 3 knots
knots : list, optional
Location of specified knots in a list. To specify the location of knots, put desired numbers for knots into a
list. Be sure that the length of the list is the same as the specified number of knots. Default is None, so
that the function will automatically determine knot locations without user specification
term : integer, float, optional
High order term for the spline terms. To calculate a quadratic spline change to 2, cubic spline
change to 3, etc. Default is 1, i.e. a linear spline
restricted : bool, optional
Whether to return a restricted spline. Note that the restricted spline returns one less column than the number
of knots. An unrestricted spline returns the same number of columns as the number of knots. Default is False,
providing an unrestricted spline
Returns
---------
function : a lambda function that accepts a numpy array or
list : a list of the knots
"""
if knots is None:
if n_knots == 1:
knots = [50]
elif n_knots == 2:
knots = [100 / 3, 200 / 3]
elif n_knots == 3:
knots = [5, 50, 95]
elif n_knots == 4:
knots = [5, 35, 65, 95]
elif n_knots == 5:
knots = [5, 27.5, 50, 72.5, 95]
elif n_knots == 6:
knots = [5, 23, 41, 59, 77, 95]
elif n_knots == 7:
knots = [2.5, 1100 / 60, 2600 / 75, 50, 7900 / 120, 4900 / 60, 97.5]
else:
raise ValueError(
'When the knot locations are not pre-specified, the number of specified knots must be'
' an integer between 1 and 7')
pts = np.percentile(array, q=knots).tolist()
else:
if n_knots != len(knots):
raise ValueError('The number of knots and the number of specified knots must match')
else:
pass
pts = knots
if sorted(pts) != pts:
raise ValueError('Knots must be in ascending order')
def _spline(x):
x = np.asarray(x)
V = np.empty((x.shape[0], len(pts)))
for i in range(len(pts)):
V[:, i] = np.where(x > pts[i], (x - pts[i]) ** term, 0)
V[:, i] = np.where(pd.isnull(x), np.nan, V[:, i])
if restricted is False:
return V
else:
for i in range(len(pts) - 1):
V[:, i] = | np.where(x > pts[i], V[:, i] - V[:, -1], 0) | numpy.where |
# Copyright 2015 <NAME> Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import os.path
import numpy
# Prepare readers for compressed files
readers = {}
try:
import gzip
readers['.gz'] = gzip.GzipFile
except ImportError:
pass
try:
import bz2
readers['.bz2'] = bz2.BZ2File
except ImportError:
pass
def smart_open(filename, mode = 'rb', *args, **kwargs):
'''
Opens a file "smartly":
* If the filename has a ".gz" or ".bz2" extension, compression is handled
automatically;
* If the file is to be read and does not exist, corresponding files with
a ".gz" or ".bz2" extension will be attempted.
(The Python packages "gzip" and "bz2" must be installed to deal with the
corresponding extensions)
'''
if 'r' in mode and not os.path.exists(filename):
for ext in readers:
if os.path.exists(filename + ext):
filename += ext
break
extension = os.path.splitext(filename)[1]
return readers.get(extension, open)(filename, mode, *args, **kwargs)
def make_context(feature, left, right):
'''
Takes a 2-D numpy feature array, and pads each frame with a specified
number of frames on either side.
'''
feature = [feature]
for i in range(left):
feature.append(numpy.vstack((feature[-1][0], feature[-1][:-1])))
feature.reverse()
for i in range(right):
feature.append(numpy.vstack((feature[-1][1:], feature[-1][-1])))
return numpy.hstack(feature)
def preprocess_feature_and_label(feature, label, opts):
'''
Apply the options 'context', 'ignore-label', 'map-label' to the feature
matrix and label vector.
'''
feature = make_context(feature, opts['lcxt'], opts['rcxt'])
if label is not None:
if opts.has_key('ignore-label'):
ignore = opts['ignore-label']
mask = numpy.array([x not in ignore for x in label])
feature = feature[mask]
label = label[mask]
if opts.has_key('map-label'):
map = opts['map-label']
label = numpy.array([map.get(x, x) for x in label])
return feature, label
def shuffle_feature_and_label(feature, label):
'''
Randomly shuffles features and labels in the *same* order.
'''
seed = 18877
numpy.random.seed(seed)
numpy.random.shuffle(feature)
numpy.random.seed(seed)
numpy.random.shuffle(label)
def shuffle_across_partitions(feature_list, label_list):
'''
Randomly shuffles features and labels in the same order across partitions.
'''
total = sum(len(x) for x in feature_list)
n = len(feature_list[0]) # Partition size
buffer = | numpy.empty_like(feature_list[0][0]) | numpy.empty_like |
import numpy as np
import pytest
from chainer_chemistry.dataset.preprocessors import wle as WLE # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
@pytest.fixture
def small_datasets():
N_1 = 3
N_2 = 5
# one-hot atom labels: 1 tp N
atom_array_1 = np.arange(N_1)
atom_array_2 = np.arange(N_2)
# adj-array, manually
# all connectes. expanded labels is a permutaion of 0,1,2
adj_array_1 = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).astype(np.int32)
# node 0 --> 0-1.2
# node 1 --> 1-0.2
# node 2 --> 2-0.1
adj_array_2 = np.array([[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[0, 0, 1, 1, 0],
[0, 0, 1, 1, 0],
[1, 1, 0, 0, 1]]).astype(np.float32)
# node 0 --> 0-1.4
# node 1 --> 1-0.4
# node 2 --> 2-3
# node 3 --> 3-2
# node 4 --> 4-0.1
# supervised labels, dummy
teach_signal_1 = | np.array(1) | numpy.array |
# -*- coding:utf-8 -*- #
'''
landsat时间序列数据分析子窗口:主要是进行时间序列数据的分析和处理
具体:1)landsat数据时间序列曲线获取
'''
from PyQt5 import QtCore, QtWidgets
from scipy.optimize import leastsq
import numpy as np
import data_manager as dm
import argrithms as ag
import scipy.io
import matplotlib.pyplot as plt
import matplotlib
import gdal
matplotlib.use("Qt5Agg") # 声明使用QT5
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
class landsatPIFsUI(QtWidgets.QWidget):
'''
目的:选则伪不变特征点--这里我选用的是基于标准差的伪不变特征点选取
输入:*.Mat(裁剪后的数据源)
输出:点
'''
def __init__(self):
super().__init__()
self.doys = ['20171023', '20171108', '20171124', '20171210', '20171226', '20180111', '20180212',
'20180316', '20180417', '20180503', '20180519', '20180604', '20180620', '20180823',
'20180908', '20180924', '20181010', '20181026', '20181213', '20181229', '20190114'] # 时间点 # 获取时间点
#
self.initUI()
#
# 重要变量
self.clipValues = [] # 裁剪区域的波段反射率
self.clipStds = [] # 裁剪区域的标准差
self.clipSlopes = [] # 裁剪区域的斜率
self.pifValues = [] # PIFS的所有点的时序曲线
self.pifDetermine = [] # PIFS判断,全是0 OR 1
def initUI(self):
#
# 初始化窗口
self.setWindowTitle('PIFs Select')
self.setWindowFlags(QtCore.Qt.Dialog)
self.setWindowModality(QtCore.Qt.ApplicationModal)
#
# 设置控件
self.groupbox_FeatureCal = QtWidgets.QGroupBox('Feature Calculation', self)
self.button_inputMatDir = QtWidgets.QPushButton('Input_MatDir', self)
self.lineEdit_inputMatDir = QtWidgets.QLineEdit(self)
self.button_stds = QtWidgets.QPushButton('stds', self)
self.button_slopes = QtWidgets.QPushButton('slopes', self)
#
self.groupbox_pifsExtract = QtWidgets.QGroupBox('PIFs Extraction', self)
self.button_pifs = QtWidgets.QPushButton('PIFs Extract', self)
self.botton_pifsMethods = QtWidgets.QPushButton('PIFs-Methods', self)
self.cmobox_pifsMethod = QtWidgets.QComboBox(self)
self.button_lower = QtWidgets.QPushButton('Lower', self)
self.button_upper = QtWidgets.QPushButton('Upper', self)
self.lineEdit_lower = QtWidgets.QLineEdit(self)
self.lineEdit_upper = QtWidgets.QLineEdit(self)
self.button_export = QtWidgets.QPushButton('Export', self)
self.button_saveMatDir = QtWidgets.QPushButton('Save_MatDir', self)
self.lineEdit_saveMatDir = QtWidgets.QLineEdit(self)
self.view = myView(self)
self.scene = QtWidgets.QGraphicsScene()
#
self.cmobox_mode = QtWidgets.QComboBox(self)
self.button_showImg = QtWidgets.QPushButton('Show', self)
#
self.groupbox_pifsOtherBands = QtWidgets.QGroupBox('PIFs-Other Bands', self)
self.button_inputOtherBandMat = QtWidgets.QPushButton('Input-OB', self)
self.lineEdit_inputOtherBandMat = QtWidgets.QLineEdit(self)
self.button_pifsImport = QtWidgets.QPushButton('PIFs-Import', self)
self.button_exportOtherBand = QtWidgets.QPushButton('Export-Values', self)
self.lineEdit_exportOtherBand = QtWidgets.QLineEdit(self)
# Layout
grid = QtWidgets.QGridLayout(self)
grid_FeatureCal = QtWidgets.QGridLayout(self.groupbox_FeatureCal)
grid_pifsExtract = QtWidgets.QGridLayout(self.groupbox_pifsExtract)
grid_pifsOtherBands = QtWidgets.QGridLayout(self.groupbox_pifsOtherBands)
#
grid.addWidget(self.groupbox_FeatureCal, 0, 0, 2, 4)
grid.addWidget(self.groupbox_pifsExtract, 2, 0, 6, 4)
grid.addWidget(self.view, 0, 4, 10, 8)
grid.addWidget(self.groupbox_pifsOtherBands, 8, 0, 2, 4)
self.view.setFixedWidth(500)
#
grid_FeatureCal.addWidget(self.button_inputMatDir, 0, 0, 1, 1)
grid_FeatureCal.addWidget(self.lineEdit_inputMatDir, 0, 1, 1, 3)
grid_FeatureCal.addWidget(self.button_stds, 1, 2, 1, 1)
grid_FeatureCal.addWidget(self.button_slopes, 1, 3, 1, 1)
#
grid_pifsExtract.addWidget(self.botton_pifsMethods, 0, 0, 1, 1)
grid_pifsExtract.addWidget(self.cmobox_pifsMethod, 0, 1, 1, 3)
grid_pifsExtract.addWidget(self.button_lower, 1, 0, 1, 1)
grid_pifsExtract.addWidget(self.lineEdit_lower, 1, 1, 1, 3)
grid_pifsExtract.addWidget(self.button_upper, 2, 0, 1, 1)
grid_pifsExtract.addWidget(self.lineEdit_upper, 2, 1, 1, 3)
grid_pifsExtract.addWidget(self.button_pifs, 3, 2, 1, 2)
grid_pifsExtract.addWidget(self.button_saveMatDir, 4, 0, 1, 1)
grid_pifsExtract.addWidget(self.lineEdit_saveMatDir, 4, 1, 1, 3)
grid_pifsExtract.addWidget(self.cmobox_mode, 5, 0, 1, 1)
grid_pifsExtract.addWidget(self.button_showImg, 5, 1, 1, 1)
grid_pifsExtract.addWidget(self.button_export, 5, 2, 1, 2)
#
grid_pifsOtherBands.addWidget(self.button_inputOtherBandMat, 0, 0, 1, 1)
grid_pifsOtherBands.addWidget(self.lineEdit_inputOtherBandMat, 0, 1, 1, 3)
grid_pifsOtherBands.addWidget(self.button_pifsImport, 1, 0, 1, 1)
grid_pifsOtherBands.addWidget(self.button_exportOtherBand, 1, 1, 1, 1)
grid_pifsOtherBands.addWidget(self.lineEdit_exportOtherBand, 1, 2, 1, 2)
#
# 初始化
self.cmobox_pifsMethod.addItems(['std', 'slope'])
self.cmobox_mode.addItems(['img-stds', 'img-pifsDerterMined', 'img-slopes'])
self.botton_pifsMethods.setDisabled(True)
self.button_lower.setDisabled(True)
self.button_upper.setDisabled(True)
self.button_pifs.setDisabled(True)
self.button_exportOtherBand.setDisabled(True)
self.button_pifs.setStyleSheet("background-color: blue")
self.button_export.setStyleSheet("background-color: blue")
self.button_pifsImport.setStyleSheet("background-color: blue")
# 槽和函数
self.button_inputMatDir.clicked.connect(self.slot_buttonInputMatDir) # 输入Mat路径
self.button_slopes.clicked.connect(self.slot_buttonSlope) # 对比PIF选择方法
self.button_stds.clicked.connect(self.slot_buttonStd) # 计算研究区域的标准差
self.button_pifs.clicked.connect(self.slot_buttonPifs) # 计算研究区域的PIFs,0 OR 1
self.button_showImg.clicked.connect(self.slot_buttonShowImg) # 显示图像
self.button_export.clicked.connect(self.slot_buttonExport) # 输出图像代表的数据
self.button_saveMatDir.clicked.connect(self.slot_buttonSaveMatDir) # 输入保存mat路径
#
self.button_inputOtherBandMat.clicked.connect(self.slot_buttonInputOtherBandMat) # 输入其他波段的反射率数据
self.button_pifsImport.clicked.connect(self.slot_buttonPIFsImport) # PIFs提取
self.button_exportOtherBand.clicked.connect(self.slot_buttonExportOtherBandsValues) # PIFs提取波段数据
def slot_buttonInputMatDir(self):
#
# 添加路径
open_filename = QtWidgets.QFileDialog.getOpenFileName(filter='*.mat')[0]
self.lineEdit_inputMatDir.setText(open_filename)
if 'S1000' in self.lineEdit_inputMatDir.text():
self.clipValues = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
self.button_pifs.setDisabled(False)
def slot_buttonSlope(self):
#
self.cmobox_mode.setCurrentIndex(2)
self.cmobox_pifsMethod.setCurrentIndex(1)
if 'slope' in self.lineEdit_inputMatDir.text():
self.clipSlopes = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['slopes']
else:
self.clipValues = scipy.io.loadmat(self.lineEdit_inputMatDir.text())['ref_Values'] # eg[21,1001,1001]
print(np.shape(np.array(self.clipValues)))
#
# 排序后最小二乘法估算斜率
arrays_clip = np.array(self.clipValues).astype(float)
for i in range( | np.shape(arrays_clip) | numpy.shape |
import os
import shutil
import json
from typing import Tuple
from matplotlib.pyplot import isinteractive
from numpy.lib.arraysetops import isin
import torch
import numpy as np
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
from networkx import to_numpy_array as nx_to_numpy_array
import dgl as dgl
import torch.backends.cudnn as cudnn
# create directory if it does not exist
def check_dir(dir_path):
dir_path = dir_path.replace('//','/')
os.makedirs(dir_path, exist_ok=True)
def check_file(file_path):
file_path = file_path.replace('//','/')
dir_path = os.path.dirname(file_path)
check_dir(dir_path)
if not os.path.exists(file_path):
with open(file_path,'w') as f:
pass
def setup_env(cpu):
# Randomness is already controlled by Sacred
# See https://sacred.readthedocs.io/en/stable/randomness.html
if not cpu:
cudnn.benchmark = True
def save_checkpoint(state, is_best, log_dir, filename='checkpoint.pth.tar'):
#check_dir(log_dir)
filename = os.path.join(log_dir, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(log_dir, 'model_best.pth.tar'))
#shutil.copyfile(filename, model_path)
print(f"Best Model yet : saving at {log_dir+'/model_best.pth.tar'}")
fn = os.path.join(log_dir, 'checkpoint_epoch{}.pth.tar')
torch.save(state, fn.format(state['epoch']))
if (state['epoch'] - 1 ) % 5 != 0:
#remove intermediate saved models, e.g. non-modulo 5 ones
if os.path.exists(fn.format(state['epoch'] - 1 )):
os.remove(fn.format(state['epoch'] - 1 ))
state['exp_logger'].to_json(log_dir=log_dir,filename='logger.json')
# move in utils
def load_model(model, device, model_path):
""" Load model. Note that the model_path argument is captured """
if os.path.exists(model_path):
print("Reading model from ", model_path)
checkpoint = torch.load(model_path, map_location=torch.device(device))
model.load_state_dict(checkpoint['state_dict'])
return model
else:
raise RuntimeError('Model does not exist!')
def save_to_json(jsonkey, loss, relevant_metric_dict, filename):
if os.path.exists(filename):
with open(filename, "r") as jsonFile:
data = json.load(jsonFile)
else:
data = {}
data[jsonkey] = {'loss':loss}
for dkey, value in relevant_metric_dict.items():
data[jsonkey][dkey] = value
with open(filename, 'w') as jsonFile:
json.dump(data, jsonFile)
# from https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def get_device(t):
if t.is_cuda:
return t.get_device()
return 'cpu'
#Matrix operation
def symmetrize_matrix(A):
"""
Symmetrizes a matrix :
If shape is (a,b,c) will symmetrize by considering a is batch size
"""
Af = A.triu(0) + A.triu(1).transpose(-2,-1)
return Af
def list_to_tensor(liste) -> torch.Tensor:
"""Transforms a list of same shaped tensors"""
if isinstance(liste,torch.Tensor):
return liste
bs = len(liste)
shape = liste[0].shape
final_shape = (bs,*shape)
tensor_eq = torch.empty(final_shape)
for k in range(bs):
tensor_eq[k] = liste[k]
return tensor_eq
#Graph operations
def edge_features_to_dense_tensor(graph, features, device='cpu'):
N = graph.number_of_nodes()
resqueeze = False
if len(features.shape)==1:
features.unsqueeze(-1)
resqueeze = True
n_feats = features.shape[1]
t = torch.zeros((N,N,n_feats)).to(device)
#adj = torch.tensor(nx_to_numpy_array(graph.to_networkx())).to(device)#edges = np.array(graph.edges().cpu()).T #Transpose for the right shape (2,n_edges)
adj = graph.adj(ctx=device).to_dense()
ix,iy = torch.where(adj==1)
t[ix,iy] = features
if resqueeze:
t.squeeze(-1)
return t
def edge_features_to_dense_sym_tensor(graph,features,device='cpu'):
t = edge_features_to_dense_tensor(graph,features,device)
if torch.all(t.transpose(0,1)+t==2*t): #Matrix already symmetric
return t
N = graph.number_of_nodes()
tril = torch.tril(torch.ones((N,N)),-1)
tril = tril.unsqueeze(-1).to(device) #For the multiplication, we need to add the dimension
if torch.all(t*tril==0): #Only zeros in the lower triangle features
return t + t.transpose(0,1) * tril #Here we remove the diagonal with '* tril'
tbool = (t!=0)
tbool = tbool.sum(-1)!=0 #Here we have True where the feature vectors are not 0
ix,iy = torch.where(tbool!=0)
for i,j in zip(ix,iy):
if i==j or torch.all(t[j,i]==t[i,j]):
continue
elif torch.all(t[j,i]==0):
t[j,i] = t[i,j]
else:
raise AssertionError(f"Feature values are asymmetric, should not have used the symetric function.")
return t
def edge_features_to_dense_features(graph, features, device='cpu'):
t = edge_features_to_dense_tensor(graph, features, device)
if len(features.shape)==1:
return t.flatten()
n_features = features.shape[1]
N = graph.number_of_nodes()
t_features = t.reshape((N**2,n_features))
return t_features
def edge_features_to_dense_sym_features(graph, features, device='cpu'):
t = edge_features_to_dense_sym_tensor(graph, features, device)
if len(features.shape)==1:
return t.flatten()
n_features = features.shape[1]
N = graph.number_of_nodes()
t_features = t.reshape((N**2,n_features))
return t_features
def edge_tensor_to_features(graph: dgl.DGLGraph, features: torch.Tensor, device='cpu'):
n_edges = graph.number_of_edges()
resqueeze = False
if len(features.shape)==3:
resqueeze=True
features = features.unsqueeze(-1)
bs,N,_,n_features = features.shape
ix,iy = graph.edges()
bsx,bsy = ix//N,iy//N
Nx,Ny = ix%N,iy%N
assert torch.all(bsx==bsy), "Edges between graphs, should not be allowed !" #Sanity check
final_features = features[(bsx,Nx,Ny)] #Here, shape will be (n_edges,n_features)
if resqueeze:
final_features = final_features.squeeze(-1)
return final_features
def temp_sym(t):
if torch.all(t.transpose(0,1)+t==2*t):
return t
elif torch.all(torch.tril(t,-1)==0):
return t + torch.triu(t,1).transpose(0,1)
else:
ix,iy = torch.where(t!=0)
for i,j in zip(ix,iy):
if t[j,i]==0:
t[j,i] = t[i,j]
elif t[j,i]==t[i,j]:
continue
else:
raise AssertionError(f"Feature values are asymmetric, should not have used the symetric function.")
return t
#QAP
def perm_matrix(row,preds):
n = len(row)
permutation_matrix = | np.zeros((n, n)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 11:20:30 2020
@author: yuhanyao
"""
##### radioactivity: Arnett model
import os
import sys
sys.path.append("/scratch/yyao/AT2019dge/playground/")
sys.path.append("/Users/yuhanyao/Documents/GitHub/AT2019dge/playground/")
import time
import numpy as np
import scipy.integrate as integrate
from helper import phys
from helper.mcmcfit import planck_lambda
from multiprocessing import Pool
import emcee
import scipy.optimize as op
import matplotlib.pyplot as plt
import corner
fs = 14
def get_int_A(x, y, s):
r = integrate.quad(lambda z: 2*z*np.exp(-2*z*y+z**2), 0, x)
int_A = r[0]
return int_A
def get_int_B(x, y, s):
r = integrate.quad(lambda z: 2*z*np.exp(-2*z*y+2*z*s+z**2), 0, x)
int_B = r[0]
return int_B
def model_arnett_Ltph(ts_, taum_ = 3, Mni_ = 0.05):
'''
Calculate the flux of a radioactivity powered SN at photospheric phase
ts is in the unit of day
Mni_ is in the unit of Msun
The euqation is from
Valenti 2008 MNRAS 383 1485V, Appendix A
'''
ts = ts_ * 24*3600 # in seconds
Mni = Mni_ * phys.sm
tau_m = taum_ * 24 * 3600.
epsilon_ni = 3.9e+10 # erg / s / g
epsilon_co = 6.78e+9 # erg / s / g
tau_ni = 8.8 * 24 * 3600 # s
tau_co = 111.3 * 24 * 3600 # s
Ls = np.zeros(len(ts))
for i in range(len(ts)):
t = ts[i]
x = t / tau_m
y = tau_m / (2 * tau_ni)
s = tau_m * (tau_co - tau_ni) / (2 * tau_co * tau_ni)
int_A = get_int_A(x, y, s)
int_B = get_int_B(x, y, s)
L = Mni * np.exp(-x**2) * ( (epsilon_ni - epsilon_co) * int_A + epsilon_co * int_B )
Ls[i] = L
# plt.loglog(ts/24/3600, Ls)
return Ls
def model_arnett_modified(ts_, taum_ = 3, Mni_ = 0.05, t0_ = 30, texp = 0):
'''
Calculate the flux of a radioactivity powered SN at photospheric phase
ts is in the unit of day
Mni_ is in the unit of Msun
The euqation is from
Valenti 2008 MNRAS 383 1485V, Appendix A
'''
ts_ = ts_ - texp
ts = ts_ * 24*3600 # in seconds
Mni = Mni_ * phys.sm
tau_m = taum_ * 24 * 3600.
t0 = t0_ * 24 * 3600.
epsilon_ni = 3.9e+10 # erg / s / g
epsilon_co = 6.78e+9 # erg / s / g
tau_ni = 8.8 * 24 * 3600 # s
tau_co = 111.3 * 24 * 3600 # s
Ls = np.zeros(len(ts))
for i in range(len(ts)):
t = ts[i]
if t<=0:
Ls[i] = 0
else:
x = t / tau_m
y = tau_m / (2 * tau_ni)
s = tau_m * (tau_co - tau_ni) / (2 * tau_co * tau_ni)
int_A = get_int_A(x, y, s)
int_B = get_int_B(x, y, s)
L = Mni * np.exp(-x**2) * ( (epsilon_ni - epsilon_co) * int_A + epsilon_co * int_B )
Ls[i] = L
# plt.loglog(ts/24/3600, Ls)
Ls_modified = np.zeros(len(Ls))
ix = ts > 0
Ls_modified[ix] = Ls[ix]* (1. - np.exp(-1*(t0/ts[ix])**2) )
return Ls_modified
def arnett_lnlike(theta, t, Ldata, Ldata_unc):
"""
taum_, Mni_, texp_ are in the unit of day, Msun, day
"""
taum_, lgMni_, t0_, texp_ = theta
Mni_ = 10**lgMni_
model = model_arnett_modified(t, taum_, Mni_, t0_, texp_)
lgmodel = np.log10(model)
# not sure what is the reason why
# ValueError: Probability function returned NaN
chi2_term = -1/2*np.sum((Ldata - lgmodel)**2/Ldata_unc**2)
error_term = np.sum(np.log(1/np.sqrt(2*np.pi*Ldata_unc**2)))
ln_l = chi2_term + error_term
return ln_l
arnett_nll = lambda *args: -arnett_lnlike(*args)
def arnett_lnprior(theta):
taum_, lgMni_, t0_, texp_ = theta
if ((1 < taum_ < 20) and (-4 < lgMni_ < 0) and (20 < t0_ < 100) and (-2.931 < texp_ < -2.891)):
return 0.
return -np.inf
def arnett_lnprob(theta, x, y, yerr):
lp = arnett_lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + arnett_lnlike(theta, x, y, yerr)
def plotChains(sampler, nburn, paramsNames, nplot):
Nparams = len(paramsNames)
fig, ax = plt.subplots(Nparams+1, 1, figsize = (8,2*(Nparams+1)), sharex = True)
fig.subplots_adjust(hspace = 0)
ax[0].set_title('Chains', fontsize=fs)
xplot = np.arange(sampler.get_chain().shape[0])
selected_walkers = np.random.choice(range(sampler.get_chain().shape[1]), nplot, replace=False)
for i,p in enumerate(paramsNames):
for w in selected_walkers:
burn = ax[i].plot(xplot[:nburn], sampler.get_chain()[:nburn,w,i],
alpha = 0.4, lw = 0.7, zorder = 1)
ax[i].plot(xplot[nburn:], sampler.get_chain(discard=nburn)[:,w,i],
color=burn[0].get_color(), alpha = 0.8, lw = 0.7, zorder = 1)
ax[i].set_ylabel(p)
if i==Nparams-1:
ax[i+1].plot(xplot[:nburn], sampler.get_log_prob()[:nburn,w],
color=burn[0].get_color(), alpha = 0.4, lw = 0.7, zorder = 1)
ax[i+1].plot(xplot[nburn:], sampler.get_log_prob(discard=nburn)[:,w],
color=burn[0].get_color(), alpha = 0.8, lw = 0.7, zorder = 1)
ax[i+1].set_ylabel('ln P')
return ax
def makeCornerArnett(sampler, nburn, paramsNames, quantiles=[0.16, 0.5, 0.84]):
samples = sampler.get_chain(discard=nburn, flat=True)
corner.corner(samples, labels = paramsNames, quantiles = quantiles,
range = [0.999, 0.999, 0.999, 0.999],
show_titles=True, plot_datapoints=False,
title_kwargs = {"fontsize": fs})
if __name__ == "__main__":
xyey = np.loadtxt('./Lbb_p20subtracted.txt')
tt = xyey[0]
lgL = xyey[1]
lgL_unc = xyey[2]
tgrid = np.linspace(1, 65)
taum_ = 6.35
Mni_ = 0.0162
t0_ = 24
texp = -2.9112151494264173
Lmodel2 = model_arnett_modified(tgrid, taum_ = taum_, Mni_ = Mni_, t0_ = t0_, texp = texp)
lgLmodel2 = np.log10(Lmodel2)
#
"""
plt.figure()
plt.errorbar(tt, lgL, lgL_unc, fmt=".k")
plt.plot(tgrid, lgLmodel2)
"""
nwalkers = 100
lgMni_ = np.log10(Mni_)
ml_guess = np.array([taum_, lgMni_, t0_, texp])
#initial position of walkers
ndim = len(ml_guess)
nfac = [1e-3]*ndim
pos = [ml_guess + nfac * np.random.randn(ndim) for i in range(nwalkers)]
max_samples = 10000
check_tau = 200
dirpath = "./arnettmodel/"
filename = dirpath + "sampler.h5"
if os.path.isfile(filename):
os.remove(filename)
backend = emcee.backends.HDFBackend(filename)
backend.reset(nwalkers, ndim)
with Pool(20) as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, arnett_lnprob,
args=(tt, lgL, lgL_unc),
pool=pool, backend=backend)
index = 0
autocorr = np.empty(max_samples)
old_tau = np.inf
for sample in sampler.sample(pos, iterations=max_samples, progress=True):
# Only check convergence every 30 steps
if sampler.iteration % check_tau:
continue
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau[:3]) # only expect the first three parameters to converge
index += 1
# Check convergence
converged = | np.all(tau * 100 < sampler.iteration) | numpy.all |
from dateutil.parser import parse as parse_datetime
from datetime import timezone
from datetime import timedelta
from datetime import datetime
import time
import pandas as pd
import numpy as np
import math
import csv
import sys
import os
import random
from sklearn.metrics import accuracy_score
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
tf_major_version = int(tf.__version__.split('.')[0])
# import tensorflow_addons as tfa
import smalltrain as st
import smalltrain.image
from smalltrain.data_set.img_data_set import IMGDataSet
from smalltrain.model.nn_model import NNModel
from smalltrain.model.one_dim_cnn_model import OneDimCNNModel
import ggutils.gif_util as gif_util
import ggutils.s3_access as s3_access
# MODEL_ID_4NN = '4NN_20180808' # 4 nn model 2019/09/10
# MODEL_ID_DNN = 'DNN' # 4 nn model 2019/09/10
# MODEL_ID_1D_CNN = '1D_CNN'
# MODEL_ID_CC = 'CC' # Carbon Copy
# MODEL_ID_LIST = [MODEL_ID_4NN, MODEL_ID_DNN, MODEL_ID_1D_CNN, MODEL_ID_CC]
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
class TwoDimCNNModel(OneDimCNNModel):
MODEL_ID_2D_CNN = '2D_CNN'
MODEL_ID = MODEL_ID_2D_CNN
def __init__(self):
return
# set class variables with hparams
# about ResNet
def set_hparams_on_res_net(self, hparams):
self.has_res_net = False
if hparams and 'has_res_net' in hparams.keys():
print('Use has_res_net in hparams:{}'.format(hparams['has_res_net']))
self.has_res_net = hparams['has_res_net']
else:
print('Use has_res_net with default value:{}'.format(self.has_res_net))
self.set_num_cnn_layers_in_res_block(hparams)
def set_num_cnn_layers_in_res_block(self, hparams):
DEFAULT_NUM_CNN_LAYERS_IN_RES_BLOCK = 2
MIN_NUM_CNN_LAYERS_IN_RES_BLOCK = 2
self.num_cnn_layers_in_res_block = DEFAULT_NUM_CNN_LAYERS_IN_RES_BLOCK
try:
if hparams and 'num_cnn_layers_in_res_block' in hparams.keys():
print('Use num_cnn_layers_in_res_block in hparams:{}'.format(hparams['num_cnn_layers_in_res_block']))
self.num_cnn_layers_in_res_block = int(hparams['num_cnn_layers_in_res_block'])
assert (self.num_cnn_layers_in_res_block >= MIN_NUM_CNN_LAYERS_IN_RES_BLOCK)
except (AssertionError, TypeError) as e:
self.num_cnn_layers_in_res_block = DEFAULT_NUM_CNN_LAYERS_IN_RES_BLOCK
print('Use num_cnn_layers_in_res_block with default value:{} because of error:{}'.format(
self.num_cnn_layers_in_res_block, e))
# about data augmentation
def set_flip_randomly_left_right(self, hparams):
self.flip_randomly_left_right = False
if hparams and 'flip_randomly_left_right' in hparams.keys():
print('Use flip_randomly_left_right in hparams:{}'.format(hparams['flip_randomly_left_right']))
self.flip_randomly_left_right = hparams['flip_randomly_left_right']
self.flip_randomly_left_right = bool(self.flip_randomly_left_right)
def set_crop_randomly_and_size(self, hparams):
self.crop_randomly = False
if hparams and 'crop_randomly' in hparams.keys():
print('Use crop_randomly in hparams:{}'.format(hparams['crop_randomly']))
self.crop_randomly = hparams['crop_randomly']
self.crop_randomly = bool(self.crop_randomly)
self.size_random_crop_from = None
if self.crop_randomly:
try:
if hparams and 'size_random_crop_from' in hparams.keys():
print('Use size_random_crop_from in hparams:{}'.format(hparams['size_random_crop_from']))
self.size_random_crop_from = float(hparams['size_random_crop_from'])
assert (self.size_random_crop_from >= self.input_img_width)
except (AssertionError, TypeError) as e:
self.size_random_crop_from = int(self.input_img_width * 1.25)
print('Use size_random_crop_from with default value:{} because of error:{}'.format(
self.size_random_crop_from, e))
def set_rotate(self, hparams):
self.rounding_angle = 90
if hparams and 'rounding_angle' in hparams.keys():
print('Use rounding_angle in hparams:{}'.format(hparams['rounding_angle']))
self.rounding_angle = hparams['rounding_angle']
self.rounding_angle = int(self.rounding_angle)
self.angle_rotate_randomly = None
if hparams and 'angle_rotate_randomly' in hparams.keys():
print('Use angle_rotate_randomly in hparams:{}'.format(hparams['angle_rotate_randomly']))
self.angle_rotate_randomly = hparams['angle_rotate_randomly']
self.angle_rotate_randomly = float(self.angle_rotate_randomly)
def set_resize_to_crop_with(self, hparams):
self.resize_to_crop_with = 'scaling_or_padding'
if hparams and 'resize_to_crop_with' in hparams.keys():
print('Use resize_to_crop_with in hparams:{}'.format(hparams['resize_to_crop_with']))
self.resize_to_crop_with = hparams['resize_to_crop_with']
def set_params(self, log_dir_path, model_id=None, train_data=None, debug_mode=True, prediction_mode=False, hparams=None):
PREFIX = '[TwoDimCNNModel]set_params'
print('{}__init__'.format(PREFIX))
self.debug_mode = debug_mode
# update by hparams
self.hparams = hparams
self.trainable_variables = None
self.model_type = 'CLASSIFICATION'
if hparams and 'model_type' in hparams.keys():
print('{}Use model_type in hparams:{}'.format(PREFIX, hparams['model_type']))
self.model_type = hparams['model_type']
else:
print('{}TODO Use ts_start with default value:{}'.format(PREFIX, self.model_type))
self.prediction_mode = prediction_mode
# about optimizer
self.optimizer = 'AdamOptimizer' # Default Optimizer
if hparams and 'optimizer' in hparams.keys():
print('{}Use optimizer in hparams:{}'.format(PREFIX, hparams['optimizer']))
self.optimizer = hparams['optimizer']
if self.optimizer is None or self.optimizer not in NNModel.AVAILABLE_OPTIMIZER_LIST:
self.optimizer = NNModel.DEFAULT_OPTIMIZER
print('{}Use optimizer with default value:{}'.format(PREFIX, self.optimizer))
self.l1_norm = 0
# whether add l1_norm_reg or not
self.add_l1_norm_reg = False
if hparams and 'add_l1_norm_reg' in hparams.keys():
print('{}Use add_l1_norm_reg in hparams:{}'.format(PREFIX, hparams['add_l1_norm_reg']))
self.add_l1_norm_reg = hparams['add_l1_norm_reg']
if self.add_l1_norm_reg is None:
self.add_l1_norm_reg = False
# preactivation regularization
self.preactivation_regularization_value = 0.0
self.add_preactivation_regularization = False
if hparams and 'add_preactivation_regularization' in hparams.keys():
print('{}Use add_preactivation_regularization in hparams:{}'.format(PREFIX, hparams['add_preactivation_regularization']))
self.add_preactivation_regularization = hparams['add_preactivation_regularization']
if self.add_preactivation_regularization is None:
self.add_preactivation_regularization = False
self.preactivation_regularization_value_ratio = 0.0
if hparams and 'preactivation_regularization_value_ratio' in hparams.keys():
print('{}Use preactivation_regularization_value_ratio in hparams:{}'.format(PREFIX, hparams['preactivation_regularization_value_ratio']))
self.preactivation_regularization_value_ratio = hparams['preactivation_regularization_value_ratio']
try:
self.preactivation_regularization_value_ratio = np.float32(self.preactivation_regularization_value_ratio)
except ValueError:
self.preactivation_regularization_value_ratio = 0.0
print('{}Use preactivation_regularization_value_ratio with default value:{}'.format(PREFIX, self.preactivation_regularization_value_ratio))
else:
print('{}Use preactivation_regularization_value_ratio with default value:{}'.format(PREFIX, self.preactivation_regularization_value_ratio))
# self.preactivation_maxout_list = [300.0, 200.0, 54.0, 18.0, 6.0, 18.0, 54.0, 200.0, 300.0, 300.0, 300.0]
self.preactivation_maxout_list = None
if hparams and 'preactivation_maxout_list' in hparams.keys():
print('{}Use preactivation_maxout_list in hparams:{}'.format(PREFIX, hparams['preactivation_maxout_list']))
self.preactivation_maxout_list = hparams['preactivation_maxout_list']
try:
assert len(self.preactivation_maxout_list) > 0
except (AssertionError, TypeError):
self.preactivation_maxout_list = None
print('{}Use preactivation_maxout_list with default value:{}'.format(PREFIX, self.preactivation_maxout_list))
else:
print('{}Use preactivation_maxout_list with default value:{}'.format(PREFIX, self.preactivation_maxout_list))
self.train_data = train_data
# Set col_size from
# 1. hparams.get('col_size')
# 2. data_set.col_size
self.col_size = hparams.get('col_size')
if self.col_size is None:
try:
self.col_size = self.data_set.col_size
except AttributeError:
self.col_size = None
if hparams and 'monochrome_mode' in hparams.keys():
print('{}Use monochrome_mode in hparams:{}'.format(PREFIX, hparams['monochrome_mode']))
self.monochrome_mode = hparams['monochrome_mode']
else:
print('{}TODO Use monochrome_mode with default value'.format(PREFIX))
self.monochrome_mode = False
# Ensure that self.col_size = 1 if Monochrome mode
if self.monochrome_mode:
self.col_size = 1
# update by hparams
self.input_img_width = 32
if hparams and 'input_img_width' in hparams.keys():
print('{}Use input_img_width in hparams:{}'.format(PREFIX, hparams['input_img_width']))
self.input_img_width = hparams['input_img_width']
else:
print('{}TODO Use input_img_width with default value'.format(PREFIX))
self.input_width = self.input_img_width
if hparams and 'n_layer' in hparams.keys():
print('{}Use n_layer in hparams:{}'.format(PREFIX, hparams['n_layer']))
self.n_layer = hparams['n_layer']
else:
print('{}TODO Use n_layer with default value'.format(PREFIX))
self.filter_width = 5
if hparams and 'filter_width' in hparams.keys():
print('{}Use filter_width in hparams:{}'.format(PREFIX, hparams['filter_width']))
self.filter_width = hparams['filter_width']
else:
print('{}Use filter_width with default value:{}'.format(PREFIX, self.filter_width))
self.cnn_channel_size = 4
if hparams and 'cnn_channel_size' in hparams.keys():
print('{}Use cnn_channel_size in hparams:{}'.format(PREFIX, hparams['cnn_channel_size']))
self.cnn_channel_size = hparams['cnn_channel_size']
else:
print('{}TODO Use cnn_channel_size with default value'.format(PREFIX))
self.cnn_channel_size_list = None
if hparams and 'cnn_channel_size_list' in hparams.keys():
print('{}Use cnn_channel_size_list in hparams:{}'.format(PREFIX, hparams['cnn_channel_size_list']))
self.cnn_channel_size_list = hparams['cnn_channel_size_list']
else:
print('{}Use cnn_channel_size with default value:{}'.format(PREFIX, self.cnn_channel_size_list))
self.pool_size_list = None
if hparams and 'pool_size_list' in hparams.keys():
print('{}Use pool_size_list in hparams:{}'.format(PREFIX, hparams['pool_size_list']))
self.pool_size_list = hparams['pool_size_list']
if self.pool_size_list is None:
self.pool_size_list = np.ones([self.n_layer], dtype="int32")
self.pool_size_list[0:1] = 2
print('{}Use pool_size_list with default value:{}'.format(PREFIX, self.pool_size_list))
self.act_func_list = None
if hparams and 'act_func_list' in hparams.keys():
print('{}Use act_func_list in hparams:{}'.format(PREFIX, hparams['act_func_list']))
self.act_func_list = hparams['act_func_list']
if self.act_func_list is None:
self.act_func_list = np.repeat(NNModel.DEFAULT_ACT_FUNC_KEY, [self.n_layer - 1])
print('{}Use act_func_list with default value:{}'.format(PREFIX, self.act_func_list))
self.act_func_ref_list = self.set_act_func_ref_list(self.act_func_list, self.n_layer)
print('{}act_func_ref_list is set :{}'.format(PREFIX, self.act_func_ref_list))
self.cnn_weight_stddev_list = None
default_cnn_weight_stddev_list = [NNModel.DEFAULT_WEIGHT_STDDEV] * self.n_layer
if hparams and 'cnn_weight_stddev_list' in hparams.keys():
print('{}Use cnn_weight_stddev_list in hparams:{}'.format(PREFIX, hparams['cnn_weight_stddev_list']))
self.cnn_weight_stddev_list = hparams['cnn_weight_stddev_list']
try:
assert len(self.cnn_weight_stddev_list) > 0
self.cnn_weight_stddev_list.extend(default_cnn_weight_stddev_list)
self.cnn_weight_stddev_list = self.cnn_weight_stddev_list[:self.n_layer]
except (AssertionError, ValueError, TypeError) as e:
self.cnn_weight_stddev_list = default_cnn_weight_stddev_list.copy()
print('{}cnn_weight_stddev_list is set: {}'.format(PREFIX, self.cnn_weight_stddev_list))
self.cnn_bias_value_list = None
default_cnn_bias_value_list = [NNModel.DEFAULT_BIAS_VALUE] * self.n_layer
if hparams and 'cnn_bias_value_list' in hparams.keys():
print('{}Use cnn_bias_value_list in hparams:{}'.format(PREFIX, hparams['cnn_bias_value_list']))
self.cnn_bias_value_list = hparams['cnn_bias_value_list']
try:
assert len(self.cnn_bias_value_list) > 0
self.cnn_bias_value_list.extend(default_cnn_bias_value_list)
self.cnn_bias_value_list = self.cnn_bias_value_list[:self.n_layer]
except (AssertionError, ValueError, TypeError) as e:
self.cnn_bias_value_list = default_cnn_bias_value_list.copy()
print('{}cnn_bias_value_list is set: {}'.format(PREFIX, self.cnn_bias_value_list))
self.num_add_fc_layers = 0
if hparams and 'num_add_fc_layers' in hparams.keys():
print('{}Use num_add_fc_layers in hparams:{}'.format(PREFIX, hparams['num_add_fc_layers']))
self.num_add_fc_layers = hparams['num_add_fc_layers']
else:
print('{}Use num_add_fc_layers with default value:{}'.format(PREFIX, self.num_add_fc_layers))
self.fc_node_size_list = None
if hparams and 'fc_node_size_list' in hparams.keys():
print('{}Use fc_node_size_list in hparams:{}'.format(PREFIX, hparams['fc_node_size_list']))
self.fc_node_size_list = hparams['fc_node_size_list']
if self.num_add_fc_layers > 0:
_default_list = [128] * self.num_add_fc_layers
if self.fc_node_size_list is not None:
self.fc_node_size_list.extend(_default_list)
self.fc_node_size_list = self.fc_node_size_list[:self.num_add_fc_layers]
print('{}fc_node_size_list is set: {}'.format(PREFIX, self.fc_node_size_list))
self.fc_weight_stddev_list = None
if hparams and 'fc_weight_stddev_list' in hparams.keys():
print('{}Use fc_weight_stddev_list in hparams:{}'.format(PREFIX, hparams['fc_weight_stddev_list']))
self.fc_weight_stddev_list = hparams['fc_weight_stddev_list']
if self.num_add_fc_layers > 0:
_default_list = [NNModel.DEFAULT_WEIGHT_STDDEV] * (1 + self.num_add_fc_layers)
if self.fc_weight_stddev_list is not None:
self.fc_weight_stddev_list.extend(_default_list)
self.fc_weight_stddev_list = self.fc_weight_stddev_list[:(1 + self.num_add_fc_layers)]
else:
self.fc_weight_stddev_list = _default_list.copy()
print('{}fc_weight_stddev_list is set: {}'.format(PREFIX, self.fc_weight_stddev_list))
self.fc_bias_value_list = None
if hparams and 'fc_bias_value_list' in hparams.keys():
print('{}Use fc_bias_value_list in hparams:{}'.format(PREFIX, hparams['fc_bias_value_list']))
self.fc_bias_value_list = hparams['fc_bias_value_list']
if self.num_add_fc_layers > 0:
_default_list = [NNModel.DEFAULT_BIAS_VALUE] * (1 + self.num_add_fc_layers)
if self.fc_bias_value_list is not None:
self.fc_bias_value_list.extend(_default_list)
self.fc_bias_value_list = self.fc_bias_value_list[:(1 + self.num_add_fc_layers)]
else:
self.fc_bias_value_list = _default_list.copy()
print('{}fc_bias_value_list is set: {}'.format(PREFIX, self.fc_bias_value_list))
# About minibatch operation
self.set_evaluate_in_minibatch(hparams)
# About sub model
self.set_hparams_on_sub_model(hparams)
# about ResNet
self.set_hparams_on_res_net(hparams)
# about data augmentation
self.set_flip_randomly_left_right(hparams)
self.set_crop_randomly_and_size(hparams)
self.set_rotate(hparams)
self.set_resize_to_crop_with(hparams)
# Abount ONNX export
self.set_export_to_onnx(hparams)
self.test_only_mode = False
if hparams and 'test_only_mode' in hparams.keys():
print('{}Use test_only_mode in hparams:{}'.format(PREFIX, hparams['test_only_mode']))
self.test_only_mode = hparams['test_only_mode']
else:
print('{}TODO Use test_only_mode with default value:{}'.format(PREFIX, self.test_only_mode))
# about min-max normalization
self.has_minmax_norm = False
if hparams and 'has_minmax_norm' in hparams.keys():
print('{}Use has_minmax_norm in hparams:{}'.format(PREFIX, hparams['has_minmax_norm']))
self.has_minmax_norm = hparams['has_minmax_norm']
else:
print('{}Use has_minmax_norm with default value:{}'.format(PREFIX, self.has_minmax_norm))
if self.has_minmax_norm:
self.input_min = None
try:
if hparams and 'input_min' in hparams.keys():
print('{}Use input_min in hparams:{}'.format(PREFIX, hparams['input_min']))
self.input_min = float(hparams['input_min'])
except (TypeError, ValueError) as e:
self.input_min = None
print('{}Use input_min from input data'.format(PREFIX))
self.input_max = None
try:
if hparams and 'input_max' in hparams.keys():
print('{}Use input_max in hparams:{}'.format(PREFIX, hparams['input_max']))
self.input_max = float(hparams['input_max'])
except (TypeError, ValueError) as e:
self.input_max = None
print('{}Use input_max from input data'.format(PREFIX))
# about batch normalization
self.has_batch_norm = True
if hparams and 'has_batch_norm' in hparams.keys():
print('{}Use has_batch_norm in hparams:{}'.format(PREFIX, hparams['has_batch_norm']))
self.has_batch_norm = hparams['has_batch_norm']
else:
print('{}TODO Use has_batch_norm with default value:{}'.format(PREFIX, self.has_batch_norm))
if self.has_batch_norm:
self.bn_decay = NNModel.DEFAULT_BN_DECAY
if hparams and 'bn_decay' in hparams.keys():
print('{}Use bn_decay in hparams:{}'.format(PREFIX, hparams['bn_decay']))
self.bn_decay = hparams['bn_decay']
else:
print('{}TODO Use bn_decay with default value:{}'.format(PREFIX, self.bn_decay))
self.bn_eps = NNModel.DEFAULT_BN_ESP
if hparams and 'bn_eps' in hparams.keys():
print('{}Use bn_eps in hparams:{}'.format(PREFIX, hparams['bn_eps']))
self.bn_eps = hparams['bn_eps']
else:
print('{}TODO Use bn_eps with default value:{}'.format(PREFIX, self.bn_eps))
self.annotation_col_names = None
if hparams and 'annotation_col_names' in hparams.keys():
print('{}Use annotation_col_names in hparams:{}'.format(PREFIX, hparams['annotation_col_names']))
self.annotation_col_names = hparams['annotation_col_names']
self.annotation_col_size = 0
if self.annotation_col_names is not None:
self.annotation_col_size = len(self.annotation_col_names)
# about mask_rate
self.mask_rate = None
if hparams and 'mask_rate' in hparams.keys():
print('{}Use mask_rate in hparams:{}'.format(PREFIX, hparams['mask_rate']))
self.mask_rate = hparams['mask_rate']
if self.mask_rate is not None:
try:
self.mask_rate = float(self.mask_rate)
except ValueError:
print('{}mask_rate is not float type. reset with None'.format(PREFIX))
self.mask_rate = None
# output_data_names
if hparams and 'output_data_names' in hparams.keys():
print('{}Use output_data_names in hparams:{}'.format(PREFIX, hparams['output_data_names']))
self.output_data_names = hparams['output_data_names']
if self.output_data_names is not None:
try:
if not isinstance(self.output_data_names, list):
raise ValueError
print('output_data_names size:{}'.format(len(self.output_data_names)))
except ValueError:
print('{}output_data_names is not list type. reset with None'.format(PREFIX))
self.output_data_names = None
self.restore_var_name_list = None
if hparams and 'restore_var_name_list' in hparams.keys():
print('{}Use restore_var_name_list in hparams:{}'.format(PREFIX, hparams['restore_var_name_list']))
self.restore_var_name_list = hparams['restore_var_name_list']
self.untrainable_var_name_list = None
if hparams and 'untrainable_var_name_list' in hparams.keys():
print('{}Use untrainable_var_name_list in hparams:{}'.format(PREFIX, hparams['untrainable_var_name_list']))
self.untrainable_var_name_list = hparams['untrainable_var_name_list']
# plot settings
self.plot_x_label = None
if hparams and 'plot_x_label' in hparams.keys():
print('{}Use plot_x_label in hparams:{}'.format(PREFIX, hparams['plot_x_label']))
self.plot_x_label = hparams['plot_x_label']
self.plot_y_label = None
if hparams and 'plot_y_label' in hparams.keys():
print('{}Use plot_y_label in hparams:{}'.format(PREFIX, hparams['plot_y_label']))
self.plot_y_label = hparams['plot_y_label']
self.plot_x_data_name_in_annotation = None
if hparams and 'plot_x_data_name_in_annotation' in hparams.keys():
print('{}Use plot_x_data_name_in_annotation in hparams:{}'.format(PREFIX, hparams['plot_x_data_name_in_annotation']))
self.plot_x_data_name_in_annotation = hparams['plot_x_data_name_in_annotation']
self.plot_group_data_name_in_annotation = None
if hparams and 'plot_group_data_name_in_annotation' in hparams.keys():
print('{}Use plot_group_data_name_in_annotation in hparams:{}'.format(PREFIX, hparams['plot_group_data_name_in_annotation']))
self.plot_group_data_name_in_annotation = hparams['plot_group_data_name_in_annotation']
self.plot_x_range = None
if hparams and 'plot_x_range' in hparams.keys():
print('{}Use plot_x_range in hparams:{}'.format(PREFIX, hparams['plot_x_range']))
self.plot_x_range = hparams['plot_x_range']
self.plot_y_range = None
if hparams and 'plot_y_range' in hparams.keys():
print('{}Use plot_y_range in hparams:{}'.format(PREFIX, hparams['plot_y_range']))
self.plot_y_range = hparams['plot_y_range']
self.plot_title = None
if hparams and 'plot_title' in hparams.keys():
print('{}Use plot_title in hparams:{}'.format(PREFIX, hparams['plot_title']))
self.plot_title = hparams['plot_title']
self.plot_errors = None
if hparams and 'plot_errors' in hparams.keys():
print('{}Use plot_errors in hparams:{}'.format(PREFIX, hparams['plot_errors']))
self.plot_errors = hparams['plot_errors']
self.plot_animation = False
if hparams and 'plot_animation' in hparams.keys():
print('{}Use plot_animation in hparams:{}'.format(PREFIX, hparams['plot_animation']))
self.plot_animation = hparams['plot_animation']
if self.plot_animation is None:
self.plot_animation = False
print('{}Use plot_animation with default value:{}'.format(PREFIX, self.plot_animation))
self.calc_cc_errors = False
if hparams and 'calc_cc_errors' in hparams.keys():
print('{}Use calc_cc_errors in hparams:{}'.format(PREFIX, hparams['calc_cc_errors']))
self.calc_cc_errors = hparams['calc_cc_errors']
if self.calc_cc_errors is None:
self.calc_cc_errors = False
print('{}Use calc_cc_errors with default value:{}'.format(PREFIX, self.calc_cc_errors))
self.op_errors = None
if hparams and 'op_errors' in hparams.keys():
print('{}Use op_errors in hparams:{}'.format(PREFIX, hparams['op_errors']))
self.op_errors = hparams['op_errors']
# rank_boundary_list
self.rank_boundary_list = None
if hparams and 'rank_boundary_list' in hparams.keys():
print('{}Use rank_boundary_list in hparams:{}'.format(PREFIX, hparams['rank_boundary_list']))
self.rank_boundary_list = hparams['rank_boundary_list']
if self.rank_boundary_list is not None:
# check the members of rank_boundary_list
len_of_rank_boundary_list = len(self.rank_boundary_list)
if len_of_rank_boundary_list < 1:
self.rank_boundary_list = None
for rank_boundary in self.rank_boundary_list:
try:
assert len(rank_boundary) > 1
lower = rank_boundary[0]
upper = rank_boundary[1]
print('{}rank_boundary lower:{}, func:{}'.format(PREFIX, lower, upper))
except Exception as e:
print('{}No rank_boundary_list is set because of error {} on invalid parameter:{}'.format(PREFIX, e, rank_boundary))
else:
print('{}No rank_boundary_list is set'.format(PREFIX))
# cloud settings
self.cloud_root = None
if hparams and 'cloud_root' in hparams.keys():
print('{}Use cloud_root in hparams:{}'.format(PREFIX, hparams['cloud_root']))
self.cloud_root = hparams['cloud_root']
self.prioritize_cloud = False
if hparams and 'prioritize_cloud' in hparams.keys():
print('{}Use prioritize_cloud in hparams:{}'.format(PREFIX, hparams['prioritize_cloud']))
self.prioritize_cloud = hparams['prioritize_cloud']
if self.prioritize_cloud is None:
self.prioritize_cloud = False
print('{}Use prioritize_cloud with default value:{}'.format(PREFIX, self.prioritize_cloud))
# local setting
self.save_root_dir = '/var/tensorflow/tsp/'
if hparams and 'save_root_dir' in hparams.keys():
print('{}Use save_root_dir in hparams:{}'.format(PREFIX, hparams['save_root_dir']))
self.save_root_dir = hparams['save_root_dir']
else:
print('{}TODO Use save_root_dir with default value'.format(PREFIX))
self.test_report_frequency = 100
if hparams and 'test_report_frequency' in hparams.keys():
print('{}Use test_report_frequency in hparams:{}'.format(PREFIX, hparams['test_report_frequency']))
self.test_report_frequency = hparams['test_report_frequency']
try:
self.test_report_frequency = int(self.test_report_frequency)
except (ValueError, TypeError) as e:
self.test_report_frequency = 100
print('{}Use test_report_frequency with default value:{} because of error:{}'.format(PREFIX, self.test_report_frequency, e))
self.train_report_frequency = 100
if hparams and 'train_report_frequency' in hparams.keys():
print('{}Use train_report_frequency in hparams:{}'.format(PREFIX, hparams['train_report_frequency']))
self.train_report_frequency = hparams['train_report_frequency']
try:
self.train_report_frequency = int(self.train_report_frequency)
except (ValueError, TypeError) as e:
self.train_report_frequency = 100
print('{}Use train_report_frequency with default value:{} because of error:{}'.format(PREFIX, self.train_report_frequency, e))
self.save_model_frequency = 100
if hparams and 'save_model_frequency' in hparams.keys():
print('{}Use save_model_frequency in hparams:{}'.format(PREFIX, hparams['save_model_frequency']))
self.save_model_frequency = hparams['save_model_frequency']
try:
self.save_model_frequency = int(self.save_model_frequency)
except (ValueError, TypeError) as e:
self.save_model_frequency = 100
print('{}Use save_model_frequency with default value:{} because of error:{}'.format(PREFIX, self.save_model_frequency, e))
self.summarize_layer_frequency = 1000
if hparams and 'summarize_layer_frequency' in hparams.keys():
print('{}Use summarize_layer_frequency in hparams:{}'.format(PREFIX, hparams['summarize_layer_frequency']))
self.summarize_layer_frequency = hparams['summarize_layer_frequency']
try:
self.summarize_layer_frequency = int(self.summarize_layer_frequency)
except (ValueError, TypeError) as e:
self.summarize_layer_frequency = 1000
print('{}Use summarize_layer_frequency with default value:{} because of error:{}'.format(PREFIX, self.summarize_layer_frequency, e))
self.summarize_layer_name_list = None
if hparams and 'summarize_layer_name_list' in hparams.keys():
print('{}Use summarize_layer_name_list in hparams:{}'.format(PREFIX, hparams['summarize_layer_name_list']))
self.summarize_layer_name_list = hparams['summarize_layer_name_list']
if self.summarize_layer_name_list is not None:
try:
assert len(self.summarize_layer_name_list) > 0
for _summarize_layer in self.summarize_layer_name_list:
assert len(_summarize_layer) > 0
except AssertionError as e:
self.summarize_layer_name_list = None
print('{}Use summarize_layer_name_list with default value:{} because of error:{}'.format(PREFIX,
self.summarize_layer_name_list,
e))
self.summarize_layer_op_obj_list = []
# check init model
self.sess = tf.InteractiveSession()
self.init_model_path = None
if hparams and 'init_model_path' in hparams.keys():
print('{}Use init_model_path in hparams:{}'.format(PREFIX, hparams['init_model_path']))
self.init_model_path = hparams['init_model_path']
# set output_classes in CLASSIFICATION model
self.output_classes = None
if hparams and 'output_classes' in hparams.keys():
print('{}Use output_classes in hparams:{}'.format(PREFIX, hparams['output_classes']))
self.output_classes = hparams['output_classes']
# if output_classes is not set in CLASSIFICATION model, try to read from init_model_path
if self.output_classes is None and self.init_model_path is not None and self.model_type == 'CLASSIFICATION':
self.output_classes = self.get_output_classes_from_model(self.init_model_path)
hparams['output_classes'] = self.output_classes
self.log_dir_path = log_dir_path
self.result_sum = []
return
def auto_set_model_parameter(self):
print('TODO auto_set_model_parameter')
self.can_not_generate_input_output_data = None
self.generate_data_set()
self.input_width = self.data_set.input_img_width
self.col_size = self.data_set.col_size
# Set output_classes if not given
if self.output_classes is None:
self.output_classes = self.data_set.output_classes
# info_dim_size_list = []
print('DONE auto_set_model_parameter')
return True
def generate_data_set(self):
self.data_set = IMGDataSet(debug_mode=self.debug_mode, prediction_mode=self.prediction_mode, hparams=self.hparams)
self.data_set.generate_input_output_data()
def get_output_classes_from_model(self, init_model_path):
from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud
print('[get_output_classes_from_model]Restore from init_model_path:{}'.format(init_model_path))
local_init_model_path = init_model_path
if self.prioritize_cloud:
# download from S3 if the "init_model_path" is S3 path
if is_s3_path(init_model_path):
_paths, _global_iter_got_from_path = get_tf_model_file_paths(init_model_path)
for _path in _paths:
local_init_model_path = download_to_local(path=_path, work_dir_path='/var/tmp/tsp/')
local_init_model_path = local_init_model_path.split('.ckpt')[0] + '.ckpt'
if _global_iter_got_from_path is not None:
local_init_model_path = local_init_model_path + '-' + str(_global_iter_got_from_path)
else:
print('[get_output_classes_from_model]Check local:{}'.format(init_model_path))
print('[get_output_classes_from_model]Check local_init_model_path:{}'.format(local_init_model_path))
if local_init_model_path is None or len(local_init_model_path) < 1 or os.path.isfile(local_init_model_path):
print('[get_output_classes_from_model]local_init_model_path is empty. output_classes set None')
self.output_classes = None
return None
meta_file_path = '{}.meta'.format(local_init_model_path)
_saver = tf.train.import_meta_graph(meta_file_path)
_saver.restore(self.sess, local_init_model_path)
# get output_classes from last layer b_fc shape
_variables = tf.get_default_graph().get_collection_ref(tf.GraphKeys.VARIABLES)
print(_variables)
try:
bias_before_output_layer_name = 'model/fc/b_fc_last/b_fc_last:0'
b_fc_last = tf.get_default_graph().get_tensor_by_name(bias_before_output_layer_name)
except KeyError as e:
# For compatibility with <=v0.1.1 (the only fc layer name is fixed to fc2)
bias_before_output_layer_name = 'model/fc/b_fc2/b_fc2:0'
b_fc_last = tf.get_default_graph().get_tensor_by_name(bias_before_output_layer_name)
# Reset the graph to restore after model construction
tf.reset_default_graph()
self.output_classes = int(b_fc_last.shape[0]) # have to cast from string to integer
return self.output_classes
def train(self, iter_to=10000, learning_rate=1e-4, batch_size=128, dropout_ratio=0.5, l1_norm_reg_ratio=0.0, save_file_path=None, report_dir_path=None):
from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud
last_time = time.time()
print('train with iter_to:{}, batch_size:{}, dropout_ratio:{}'.format(iter_to, batch_size, dropout_ratio))
# TODO
train_index = 0
# input_data = self.data_set.input_data
# output_data = self.data_set.output_data
# train_index_list = self.data_set.train_index_list
# test_index_list = self.data_set.test_index_list
# test_size = 31 + 30 # 2015/9, 10
# test_size = int(len(output_data) * 0.1)
# setup each test data
# _input_data = self.data_set.input_data
test_data = self.data_set.get_test_input_data()
if (self.mask_rate is not None) and self.mask_rate > 0:
# masked_test_data = self.data_set.masked_input_data[test_index_list].astype(np.float32)
masked_test_data = self.data_set.get_masked_test_input_data()
if self.monochrome_mode:
print(test_data.shape)
if test_data.shape[3] == 3:
monochrome_test_data = np.zeros((test_data.shape[0], test_data.shape[1], test_data.shape[2], 1), dtype=np.int)
# print('monochrome_test_data.shape: {}'.format(monochrome_test_data.shape))
_size = len(test_data)
for i in range(test_data.shape[0]):
binarized_img = self.data_set.binarize_img(test_data[i])
# print('binarized_img.shape: {}'.format(binarized_img.shape))
monochrome_test_data[i,:,:,0] = binarized_img
# if i % 100 == 0:
# print('DONE binarize_img {}/{}'.format(i, _size))
test_data = monochrome_test_data
# test_values = np.asarray(output_data[test_index_list], dtype=np.float32)
test_values = self.data_set.get_test_output_data()
if self.model_type == 'CLASSIFICATION':
test_values_laveled = np.argmax(test_values, axis=1)
else:
# test_values = test_values.reshape(-1) # TODO
raise Exception('only classification model type is available.')
test_labels = | np.argmax(test_values, axis=1) | numpy.argmax |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from libsvm.svmutil import *
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from timeit import default_timer as timer
#Reading files
data_points_train = pd.read_csv('2019MT60763.csv', header = None, nrows = 3000)
data = np.array((data_points_train.sort_values(data_points_train.columns[25])).values)
dp = np.array(data)
class_label = dp[:,25]
# counting no of occurence of labels of each class
unique, counts = np.unique(class_label, return_counts=True)
dict(zip(unique, counts))
#print(counts)
# for 25 features
# FOR CLASSES {0,1}
text_x = dp[:631,:25]
text_t = dp[:631,25]
# for cross_validation
tp_x_1 = np.append(dp[:100,:25],dp[306:406,:25],axis=0)
tp_t_1 = np.append(dp[:100,25],dp[306:406,25],axis=0)
tp_x_2 = np.append(dp[101:201,:25],dp[407:507,:25],axis=0)
tp_t_2 = np.append(dp[101:201,25],dp[407:507,25],axis=0)
tp_x_3 = np.append(dp[202:305,:25],dp[508:631,:25],axis=0)
tp_t_3 = np.append(dp[202:305,25],dp[508:631,25],axis=0)
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='linear'))])
parameters = {'SVM__C':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
x = G.score(tp_x_2, tp_t_2)
x+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
x+=G.score(tp_x_3, tp_t_3)
x+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
x+=G.score(tp_x_2, tp_t_2)
x+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',x/6)
print(((svm.SVC(kernel = 'linear', C = 1)).fit(text_x,text_t)).support_)
fig = plt.figure(1)
c = np.logspace(0, 1, 10)
matrix = np.zeros((10,3))
for i in range (10):
svc = svm.SVC(kernel='linear',C = c[i])
svc.fit(text_x, text_t)
matrix[i][0] = i
matrix[i][1] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
x1 = svc.score(tp_x_2, tp_t_2)
x1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
x1+=svc.score(tp_x_3, tp_t_3)
x1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
x1+=svc.score(tp_x_2, tp_t_2)
x1+=svc.score(tp_x_1, tp_t_1)
matrix[i][2] = x1/6
plt.plot(matrix[:,0:1],matrix[:,1:2],label = 'cross_validation score')
plt.plot(matrix[:,0:1],matrix[:,2:3],label = 'Training score')
plt.title('C vs Accuracy')
plt.xlabel('C')
plt.ylabel('Accuracy')
plt.xscale('log')
plt.legend()
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='rbf'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
y = G.score(tp_x_2, tp_t_2)
y+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
y+=G.score(tp_x_3, tp_t_3)
y+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
y+=G.score(tp_x_2, tp_t_2)
y+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',y/6)
print(((svm.SVC(kernel = 'rbf', C = 1.29,gamma = 1)).fit(text_x,text_t)).support_)
puto = np.zeros((100,1))
luto = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='rbf',C = c[i],gamma = g[j])
svc.fit(text_x, text_t)
puto[10*i+j][0] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
y1 = svc.score(tp_x_2, tp_t_2)
y1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
y1+=svc.score(tp_x_3, tp_t_3)
y1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
y1+=svc.score(tp_x_2, tp_t_2)
y1+=svc.score(tp_x_1, tp_t_1)
luto[10*i+j][0] = y1/6
g, c = np.meshgrid(g, c)
graph = np.ravel(puto)
patrix = np.ravel(luto)
patrix = patrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, patrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (cross-validation)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
graph = graph.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, graph)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (training)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
start = timer()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='poly'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10),'SVM__degree':[1,5]}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
z = G.score(tp_x_2, tp_t_2)
z+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
z+=G.score(tp_x_3, tp_t_3)
z+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
z+=G.score(tp_x_2, tp_t_2)
z+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',z/6)
end = timer()
print('TIME',end - start)
print(((svm.SVC(kernel = 'poly', C = 1,gamma = 1,degree = 1)).fit(text_x,text_t)).support_)
suto = np.zeros((100,1))
nuto = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='poly',C = c[i],gamma = g[j],degree = 1)
svc.fit(text_x, text_t)
suto[10*i+j][0] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
z1 = svc.score(tp_x_2, tp_t_2)
z1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
z1+=svc.score(tp_x_3, tp_t_3)
z1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
z1+=svc.score(tp_x_2, tp_t_2)
z1+=svc.score(tp_x_1, tp_t_1)
nuto[10*i+j][0] = z1/6
g, c = np.meshgrid(g, c)
trix = np.ravel(suto)
prix = np.ravel(nuto)
prix = prix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, prix)
cbar = fig.colorbar(k)
plt.xlabel('C')
plt.ylabel('gamma')
plt.title('Contour plot for Accuracy v/s C and gamma (cross-validation)')
plt.xscale('log')
plt.yscale('log')
plt.show()
# training
trix = trix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, trix)
cbar = fig.colorbar(k)
plt.xlabel('C')
plt.ylabel('gamma')
plt.title('Contour plot for Accuracy v/s C and gamma (training)')
plt.xscale('log')
plt.yscale('log')
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='sigmoid'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
f = G.score(tp_x_2, tp_t_2)
f+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
f+=G.score(tp_x_3, tp_t_3)
f+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
f+=G.score(tp_x_2, tp_t_2)
f+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',f/6)
print(((svm.SVC(kernel = 'sigmoid', C = 10,gamma = 1)).fit(text_x,text_t)).support_)
jito = np.zeros((100,1))
kito = np.zeros((100,1))
c = np.logspace(0, 1, 10)
g = np.logspace(0, 1, 10)
for i in range (10):
for j in range(10):
svc = svm.SVC(kernel='sigmoid',C = c[i],gamma = g[j])
svc.fit(text_x, text_t)
jito[10*i+j][0] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
f1 = svc.score(tp_x_2, tp_t_2)
f1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
f1+=svc.score(tp_x_3, tp_t_3)
f1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
f1+=svc.score(tp_x_2, tp_t_2)
f1+=svc.score(tp_x_1, tp_t_1)
kito[10*i+j][0] = f1/6
g, c = np.meshgrid(g, c)
tatrix = np.ravel(jito)
katrix = np.ravel(kito)
katrix = katrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, katrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (cross-validation)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
tatrix = tatrix.reshape(c.shape)
fig, p = plt.subplots()
k = p.contourf(c, g, tatrix)
cbar = fig.colorbar(k)
plt.title('Accuracy v/s C and gamma (training)')
plt.xlabel('C')
plt.ylabel('gamma')
plt.xscale('log')
plt.yscale('log')
plt.show()
# In[5]:
# FOR CLASSES {2,3}
text_x_2 = (dp[632:1230,:25])
text_t_2 = (dp[632:1230,25])
# for cross_validation
tp_x_1 = np.append(dp[632:732,:25],dp[943:1043,:25],axis=0)
tp_t_1 = np.append(dp[632:732,25],dp[943:1043,25],axis=0)
tp_x_2 = np.append(dp[732:832,:25],dp[1043:1143,:25],axis=0)
tp_t_2 = np.append(dp[732:832,25],dp[1043:1143,25],axis=0)
tp_x_3 = np.append(dp[832:942,:25],dp[1143:1230,:25],axis=0)
tp_t_3 = np.append(dp[832:942,25],dp[1143:1230,25],axis=0)
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='linear'))])
parameters = {'SVM__C':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x_2, text_t_2)
print ('Training score',G.score(text_x_2, text_t_2))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
l1 = G.score(tp_x_2, tp_t_2)
l1+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
l1+=G.score(tp_x_3, tp_t_3)
l1+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
l1+=G.score(tp_x_2, tp_t_2)
l1+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',l1/6)
print(((svm.SVC(kernel = 'linear', C = 7.74)).fit(text_x_2,text_t_2)).support_)
fig = plt.figure(2)
c = | np.logspace(0, 1, 10) | numpy.logspace |
'''
------------------------------------------------------------------------
Functions for generating demographic objects necessary for the OG-USA
model
This module defines the following function(s):
get_fert()
get_mort()
pop_rebin()
get_imm_resid()
immsolve()
get_pop_objs()
------------------------------------------------------------------------
'''
import os
import pickle
import numpy as np
import pandas as pd
import scipy.optimize as opt
import scipy.interpolate as si
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import parameter_plots as pp
# create output directory for figures
CUR_PATH = os.path.split(os.path.abspath(__file__))[0]
OUTPUT_DIR = os.path.join(CUR_PATH, 'OUTPUT', 'Demographics')
if os.access(OUTPUT_DIR, os.F_OK) is False:
os.makedirs(OUTPUT_DIR)
'''
------------------------------------------------------------------------
Define functions
------------------------------------------------------------------------
'''
def get_true_demog_data(min_age, max_age):
'''
Return the true demographic data for a country.
Args:
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
Returns:
fert_data (Numpy array): fertility rates for each model period of life
mort_data (Numpy array): mortality rates for each model period of life
imm_data (Numpy array): immigration rates for each model period of life
pop_data (Numpy array): population for each model period of life
'''
# Filepaths
fert_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'fert.p')
mort_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'mort.p')
pop_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'pop.p')
imm_filepath = os.path.join(CUR_PATH, 'data', 'demographic', 'clean', 'imm.p')
# Load data
fert_data = pickle.load(open(fert_filepath, 'rb'))
mort_data = pickle.load(open(mort_filepath, 'rb'))
pop_data = pickle.load(open(pop_filepath, 'rb'))
imm_data = pickle.load(open(imm_filepath, 'rb'))
# Take most recent population
pop_2014 = pop_data[2014][max(min_age, 0): min(max_age + 1, len(pop_data[2014]))]
pop_2015 = pop_data[2015][max(min_age, 0): min(max_age + 1, len(pop_data[2015]))]
# Take immigration as average over last 3 years
drop_immyears = sorted(imm_data.columns)[:- 3]
imm_data = imm_data.drop(drop_immyears, axis=1)
imm_data = imm_data.mean(axis=1)
return pop_2014, pop_2015, fert_data[2014], mort_data[2014], imm_data
def select_fert_data(fert, set_zeroes=False):
new_fert = fert[fert['AgeDef'] == 'ARDY']
new_fert = new_fert[new_fert['Collection'] == 'HFD']
new_fert = new_fert[(new_fert['RefCode'] == 'JPN_11')]
new_fert.drop(['AgeDef', 'Collection', 'RefCode'], axis=1, inplace=True)
new_fert.columns = ['Year', 'Age', 'Values']
if set_zeroes:
new_fert['Values'][new_fert['Age'] == 14] = 0
new_fert['Values'][new_fert['Age'] == 15] = 0
new_fert['Values'][new_fert['Age'] == 49] = 0
new_fert['Values'][new_fert['Age'] == 50] = 0
return new_fert.astype(float)
# a = get_fert(100, 0, 99, 'jpn', 'dynamic_partial')
def get_fert(totpers, min_age, max_age, graph=False, demog_files=[False, False, False]):
'''
Generate a vector of fertility rates by model period
age that corresponds to the fertility rate data by age in years.
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
demog_files (Pandas dataframe): alternate demographic forecasts
Returns:
fert_rates (Numpy array): fertility rates for each model period of life
'''
fert_all, mort_all, imm_all = demog_files
# Get data
curr_pop, _, curr_fert, _, _ = get_true_demog_data(min_age, max_age)
# Birth ages
birth_ages = np.arange(14, 51)
# Population Distribution
curr_pop_pct = curr_pop / curr_pop.sum()
if (min_age == 1) and (max_age == 100) and (totpers == 100) and (not graph):
fert_rates = np.zeros(totpers)
# Births from 14-50, but age start at 0
fert_rates[15:52] = curr_fert
return fert_rates
### VARIABLE PREPARATION
num_bins = max_age - min_age + 1
binsize = num_bins / totpers
num_sub_bins = float(10000)
len_subbins = (np.float64(num_bins * num_sub_bins)) / totpers
### POPULATION CREATION
ages = np.linspace(max(min_age, 0), min(max_age, 99), curr_pop_pct.shape[0])
pop_func = si.splrep(ages, curr_pop_pct)
new_bins = np.linspace(max(min_age, 0), min(max_age, 99), int(num_sub_bins * (num_bins - 1)), dtype=float)
curr_pop_sub = si.splev(new_bins, pop_func)
curr_pop_sub = curr_pop_sub / curr_pop_sub.sum()
#### AGE BIN CREATION
# Calculate implied fertility rates in sub-bins of curr_fert
fert_func = si.splrep(birth_ages, curr_fert)
fert_rates_sub = np.zeros(curr_pop_sub.shape)
age_sub = (np.linspace(np.float64(binsize) / num_sub_bins + np.float64(min_age), np.float64(max_age), int(num_sub_bins * (num_bins - 1))) - 0.5 * np.float64(binsize) / num_sub_bins)
# Fill in fertility rates
pred_ind = (age_sub >= birth_ages[0]) * (age_sub <= birth_ages[-1]) # Makes sure it is inside valid range
age_pred = age_sub[pred_ind] # Gets age_sub in the valid range by applying pred_ind
fert_rates_sub[pred_ind] = np.float64(si.splev(age_pred, fert_func))
fert_rates_sub[fert_rates_sub < 0] = 0
fert_rates = np.zeros(totpers)
for i in range(totpers):
beg_sub_bin = int(np.rint(i * len_subbins))
end_sub_bin = int(np.rint((i + 1) * len_subbins))
if i == totpers - 1:
end_sub_bin += 1
fert_rates[i] = ((
curr_pop_sub[beg_sub_bin:end_sub_bin] *
fert_rates_sub[beg_sub_bin:end_sub_bin]).sum() /
curr_pop_sub[beg_sub_bin:end_sub_bin].sum())
fert_rates = np.nan_to_num(fert_rates)
if graph:
pp.plot_fert_rates(fert_func, birth_ages, totpers, min_age, max_age, curr_fert, fert_rates, output_dir=OUTPUT_DIR)
if (min_age == 1) and (max_age == 100) and (totpers == 100):
fert_rates = np.zeros(totpers)
# Births from 14-50, but age start at 0
fert_rates[15:52] = curr_fert
return fert_rates
def get_mort(totpers, min_age, max_age, graph=False, mort_file=False):
'''
This function generates a vector of mortality rates by model period
age.
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_age (int): age in years at which agents are born, >= 0
max_age (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
demog_files (Pandas dataframe): alternate demographic forecasts
Returns:
mort_rates (Numpy array): mortality rates that correspond to each
period of life
infmort_rate (scalar): infant mortality rate
'''
# Get data
_, _, _, curr_mort, _ = get_true_demog_data(min_age, max_age)
# Mortality ages
mort_ages = np.linspace(0, 99, 100).astype(int)
# Infant Mortality Rate
infmort_rate = curr_mort[0]
if (min_age == 1) and (max_age == 100) and (totpers == 100) and (not graph):
return curr_mort, 0 # infmort_rate
### VARIABLE PREPARATION
num_bins = max_age - min_age + 1
binsize = num_bins / totpers
num_sub_bins = int(100)
len_subbins = ((np.float64(num_bins * num_sub_bins)) / totpers)
#### AGE BIN CREATION
# Calculate implied mortality rates in sub-bins of curr_mort
mort_func = si.splrep(mort_ages, curr_mort)
mort_sub = (np.linspace(np.float64(binsize) / num_sub_bins + np.float64(min_age), np.float64(max_age), int(num_sub_bins * (num_bins - 1))) - 0.5 * np.float64(binsize) / num_sub_bins) # CORRECT TO NUM_BINS NOT -1
# Fill in mortality rates
mort_rates_sub_orig = 1 - si.splev(mort_sub, mort_func)
mort_rates_sub_orig[mort_rates_sub_orig > 1] = 1
mort_rates_sub_orig[mort_rates_sub_orig < 0] = 0
mort_rates_sub = np.zeros(mort_rates_sub_orig.shape, dtype=float)
for i in range(totpers):
beg_sub_bin = int(np.rint(i * num_sub_bins))
end_sub_bin = int(np.rint((i + 1) * num_sub_bins))
if i == totpers - 1:
end_sub_bin += 1
tot_period_surv = (np.log(mort_rates_sub_orig[beg_sub_bin:end_sub_bin]) ).sum()
end_surv = np.log(1 - curr_mort[min_age:][i])
if tot_period_surv != 0:
power = end_surv / tot_period_surv
else:
power = 0
mort_rates_sub[beg_sub_bin:end_sub_bin] = mort_rates_sub_orig[beg_sub_bin:end_sub_bin] ** power
mort_rates = np.zeros(totpers)
for i in range(totpers):
beg_sub_bin = int(np.rint(i * len_subbins))
end_sub_bin = int(np.rint((i + 1) * len_subbins))
if i == totpers - 1:
end_sub_bin += 1
mort_rates[i] = 1 - mort_rates_sub[beg_sub_bin:end_sub_bin].prod()
mort_rates[-1] = 1 # Mortality rate in last period is set to 1
if graph:
pp.plot_mort_rates_data(totpers, min_age, max_age, mort_ages[max(min_age, 0):min(max_age + 1, 100)],
curr_mort[max(min_age, 0):min(max_age + 1, 100)], infmort_rate,
mort_rates, output_dir=OUTPUT_DIR)
if (min_age == 1) and (max_age == 100) and (totpers == 100):
mort_rates = curr_mort
return mort_rates, 0 # infmort_rate
def pop_rebin(curr_pop_dist, totpers_new):
'''
For cases in which totpers (E+S) is less than the number of periods
in the population distribution data, this function calculates a new
population distribution vector with totpers (E+S) elements.
Args:
curr_pop_dist (Numpy array): population distribution over N
periods
totpers_new (int): number of periods to which we are
transforming the population distribution, >= 3
Returns:
curr_pop_new (Numpy array): new population distribution over
totpers (E+S) periods that approximates curr_pop_dist
'''
# Number of periods in original data
assert totpers_new >= 3
totpers_orig = len(curr_pop_dist)
if int(totpers_new) == totpers_orig:
curr_pop_new = curr_pop_dist
elif int(totpers_new) < totpers_orig:
num_sub_bins = float(10000)
ages = np.linspace(0, totpers_orig - 1, totpers_orig)
pop_func = si.splrep(ages, curr_pop_dist)
new_bins = np.linspace(0, totpers_orig - 1,\
int(num_sub_bins * totpers_orig))
pop_ests = si.splev(new_bins, pop_func)
len_subbins = ((np.float64(totpers_orig * num_sub_bins)) /
totpers_new)
curr_pop_new = np.zeros(totpers_new, dtype=np.float64)
for i in range(totpers_new):
beg_sub_bin = int(np.rint(i * len_subbins))
end_sub_bin = int(np.rint((i + 1) * len_subbins))
curr_pop_new[i] = \
np.average(pop_ests[beg_sub_bin:end_sub_bin])
# Return curr_pop_new to single precision float (float32)
# datatype
curr_pop_new = np.float32(curr_pop_new) * np.sum(curr_pop_dist) / np.sum(curr_pop_new) # Adjust sum
return curr_pop_new
def predict_population(fert_rate, mort_rate, imm_rate, pop_data):
'''
Predict population as pop_{s+1,t+1} = pop_{s,t}(1 - mort_{t-1})) + pop_{s+1,t}(imm_{t-1}),
and setting pop_{0,t+1} = pop_t * fert_t
'''
# First, calculate births
if len(fert_rate) == 100:
births = fert_rate * pop_data / 2
else:
# Births from 14-50, but age start at 0
births = fert_rate * pop_data[15:52] / 2
births = np.sum(births)
# Second, calculate survivors
survivors = (1 - mort_rate) * pop_data
survivors = np.roll(survivors, 1)
# Third, correct births
survivors[0] = births
# Third, calculate immigration
imm = imm_rate * pop_data
# Fourth, calculate predicted population
pred_pop = survivors + imm
return pred_pop
def immsolve(imm_rates, *args):
'''
This function generates a vector of errors representing the
difference in two consecutive periods stationary population
distributions. This vector of differences is the zero-function
objective used to solve for the immigration rates vector, similar to
the original immigration rates vector from util.calc_imm_resid(), that
sets the steady-state population distribution by age equal to the
population distribution in period int(1.5*S)
Args:
imm_rates (Numpy array):immigration rates that correspond to
each period of life, length E+S
args (tuple): (fert_rates, mort_rates, infmort_rate, omega_cur,
g_n_SS)
Returns:
omega_errs (Numpy array): difference between omega_new and
omega_cur_pct, length E+S
'''
fert_rates, mort_rates, infmort_rate, omega_cur_lev, g_n_SS = args
omega_cur_pct = omega_cur_lev / omega_cur_lev.sum()
new_pop = predict_population(fert_rates, mort_rates, imm_rates, omega_cur_lev)
omega_new = new_pop / new_pop.sum()
omega_errs = omega_new - omega_cur_pct
return omega_errs
def calc_imm_resid(fert_t_minus_1, mort_t_minus_1, pop_t_minus_1, pop_t):
'''
Calculate immigration rate in year t
as (pop_t - pop_{t-1}(1 - mort_{t-1})) / (pop_{t-1}),
and setting pop_t_0 = pop_{t-1} * fert_{t-1}
'''
# First, calculate births
if len(fert_t_minus_1) == 100:
births = fert_t_minus_1 * pop_t_minus_1 / 2
else:
# Births from 14-50, but age start at 0
births = fert_t_minus_1 * pop_t_minus_1[15:52] / 2
births = | np.sum(births) | numpy.sum |
#!/usr/bin/env python
"""
Tools to work with data from different spectrographs: CARMENES, HARPS, HARPN.
Uses functions from `harpsutils` and `carmenesutils`.
"""
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import pandas as pd
from . import carmenesutils
from . import harpsutils
from . import expresutils
###############################################################################
# Spectrograph data
# -----------------
# Resolution
dicres = {
'CARM_VIS': 94600,
'CARM_NIR': 80400,
'HARPS': 115000,
'HARPN': 115000,
'EXPRES': 150000,
}
# Number of orders
dicnord = {
'CARM_VIS': 61,
'CARM_NIR': 28,
'HARPS': 72,
'HARPN': 69,
'EXPRES': 86,
}
def inst_nord(inst, carmnirsplit=True, notfound=None, verb=True):
"""Get number of orders for instrument `inst`.
Parameters
----------
carmnirsplit : bool (default True)
Multiply CARM_NIR orders by 2. Usually use CARM_NIR orders split by half, so have double of orders.
Returns
-------
nord : int
"""
try:
nord = dicnord[inst]
if inst == 'CARM_NIR' and carmnirsplit:
nord += nord # double
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
nord = notfound
return nord
# Reference order
dicoref = {
'CARM_VIS': 36,
'CARM_NIR': 11, # This is the double already
'HARPS': 55,
'HARPN': 55,
'EXPRES': 60, # ?
}
def inst_oref(inst, carmnirsplit=True, notfound=None, verb=True):
"""Get reference order for instrument `inst`.
Parameters
----------
carmnirsplit : bool (default True)
Multiply CARM_NIR `oref` by 2. Usually use CARM_NIR orders split by half, so have double of orders.
Returns
-------
oref : int
"""
try:
oref = dicoref[inst]
if inst == 'CARM_NIR' and carmnirsplit == False:
oref = int(oref / 2) # half
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
oref = notfound
return oref
# RV per pixel [km/s]
dicrvpixmed = {
'CARM_VIS': 1.258,
'CARM_NIR': 1.356,
'HARPS': 0.820,
'HARPN': 0.820,
'EXPRES': 0.500,
}
def inst_rvpixmed(inst, notfound=None, verb=True):
"""Get the median delta RV per pixel for instrument `inst`.
Parameters
----------
Returns
-------
rvpixmed : int
"""
try:
rvpixmed = dicrvpixmed[inst]
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
rvpixmed = notfound
return rvpixmed
# Spectral sampling s [pix/SE] (SE: spectral element, ~ FWHM)
dictspix = {
'CARM_VIS': 2.5,
'CARM_NIR': 2.8,
'HARPS': 3.2,
'HARPN': 3.2,
'EXPRES': 3.6, # 4.0
}
def inst_rvpixmed(inst, notfound=None, verb=True):
"""Get sampling [pix/SE] for instrument `inst`.
Parameters
----------
Returns
-------
rvpixmed : int
"""
try:
rvpixmed = dictspix[inst]
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
rvpixmed = notfound
return rvpixmed
###############################################################################
# Reduced spectra
# ---------------
# Read reduced spectrum
def fitsred_read(filin, inst,
# CARMENES
carmnirdiv=True,
# HARPS/N
harpblaze=True, dirblaze=None, filblaze=None,
# EXPRES
expresw='bary_excalibur',
):
"""
Parameters
----------
carmnirdiv : bool, default True
If True, divide the orders by the discontinuity at the center. If not, leave them as read from the FTIS. Only works for `inst='CARM_NIR'`.
harpblaze : bool, default True
If True, get the blaze function from the corresponding file (see `dirblaze` and `filblaze`). If False, the output corresponding to the blaze, `c`, is an array of ones lwith the shape of `f`.
dirblaze : str, default None
Directory containing the blaze file. If None (default), assume the blaze file is in the same directory as `filin`.
harpfilbalze : str, default None
Blaze file name. If None (default), get the file name from the header.
Returns
-------
"""
if inst == 'CARM_VIS':
w, f, sf, c, header = carmenesutils.caracal_fitsred_read(filin)
dataextra = {}
elif inst == 'CARM_NIR':
w, f, sf, c, header = carmenesutils.caracal_fitsred_read(filin)
dataextra = {}
if carmnirdiv:
# w, f, sf, c = carmenesutils.caracal_fitsrednir_divide_ords(w=w, f=f, sf=sf, c=c)
a = carmenesutils.caracal_fitsrednir_divide_ords(w=w, f=f, sf=sf, c=c)
w, f, sf, c = a['w'], a['f'], a['sf'], a['c']
elif inst == 'HARPS' or inst == 'HARPN':
w, f, c, header, _ = harpsutils.drs_e2dsred_read(filin, readblaze=harpblaze, dirblaze=dirblaze, filblaze=filblaze, inst=inst)
sf = np.zeros_like(w)
dataextra = {}
elif inst == 'EXPRES':
w, wcb, we, wecb, mwecb, f, sf, c, b, mf, header, header1, header2 = expresutils.drs_fitsred_read(filin)
if expresw == 'bary_excalibur':
w = wecb
elif expresw == 'excalibur':
w = we
elif expresw == 'bary_wavelength':
w = w
elif expresw == 'wavelength':
w = wcb
dataextra = {'blaze': b, 'pixel_mask': mf, 'excalibur_mask': mwecb, 'header1': header1, 'header2': header2}
return w, f, sf, c, header, dataextra
# -----------------------------------------------------------------------------
# Values from header
# ------------------
# Get BJD from header
def header_bjd_lisobs(lisobs, inst, name='bjd', notfound=np.nan, ext=0):
"""
Get the BJD from the header of the observations in `lisobs`.
Parameters
----------
name : str or None (default 'bjd')
Change the pandas dataframe column name to `name`. If `None`, keep the header keyword as the column name.
"""
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lisbjd = carmenesutils.caracal_bjd_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
# # Change column names
# if name is not None:
# lisbjd.rename(columns={'HIERARCH CARACAL BJD': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisbjd = harpsutils.drs_bjd_lisobs(lisobs, inst, notfound=notfound, ext=ext, name=name)
# # Change column names
# if name is not None:
# kwinst = harpsutils.headerkwinst(inst, outfail=np.nan)
# lisbjd.rename(columns={kwinst + 'DRS BJD': name}, inplace=True)
elif inst == 'EXPRES':
lisbjd = expresutils.drs_bjd_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
return lisbjd
# Get readout noise RON from header
def header_ron_lisobs(lisobs, inst, name='ron', notfound=np.nan, ext=0):
"""
Get the RON from the header of the observations in `lisobs`.
Parameters
----------
name : str or None (default 'ron')
Change the pandas dataframe column name to `colname`. If `None`, keep the header keyword as the column name.
"""
if inst == 'CARM_VIS':
lisron = carmenesutils.caracal_ron_lisobs_vis(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisron.rename(columns={'E_READN1': name}, inplace=True)
elif inst == 'CARM_NIR':
lisron = carmenesutils.caracal_ron_lisobs_nir(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisron.rename(columns={'E_READN': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisron = harpsutils.drs_ron_lisobs(lisobs, inst, notfound=notfound, ext=ext)
# Change column names
if name is not None:
kwinst = harpsutils.headerkwinst(inst, outfail=np.nan)
lisron.rename(columns={kwinst + 'DRS CCD SIGDET': name}, inplace=True)
elif inst == 'EXPRES':
# TODO: set to 0 for now
lisron = pd.DataFrame(np.zeros_like(lisobs, dtype=float), columns=[name], index=lisobs)
return lisron
# Get exposure time from header
def header_exptime_lisobs(lisobs, inst, name='exptime', notfound=np.nan, ext=0):
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lisexptime = carmenesutils.caracal_exptime_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisexptime.rename(columns={'EXPTIME': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisexptime = harpsutils.drs_exptime_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisexptime.rename(columns={'EXPTIME': name}, inplace=True)
if inst == 'EXPRES':
lisexptime = expresutils.drs_exptime_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
return lisexptime
# Get airmass time from header
def header_airmass_lisobs(lisobs, inst, name='airmass', notfound=np.nan, ext=0):
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lisairmass = carmenesutils.caracal_airmass_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisairmass.rename(columns={'AIRMASS': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisairmass = harpsutils.drs_airmass_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisairmass.rename(columns={'AIRMASS': name}, inplace=True)
elif inst == 'EXPRES':
lisairmass = expresutils.drs_airmass_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
return lisairmass
# Get SNR from header
def header_snr_lisobs(lisobs, inst, name='snro', ords=None, notfound=np.nan, ext=0):
"""
Get the SNR from the header of the orders `ords` for the observations in `lisobs`.
Parameters
----------
name : {'ord', 'snro'} or None
Change to pandas dataframe column name. If `ord`, change to the order number (an int, e.g. 36). If `snro`, change to e.g. `snro36`. If None, keep the header keyword as the column name.
"""
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lissnr = carmenesutils.caracal_snr_lisobs(lisobs, ords=ords, notfound=notfound, ext=ext)
# Change column names
if name is not None:
if name == 'ord':
changecol = {i: int(i.replace('HIERARCH CARACAL FOX SNR ', '')) for i in lissnr.columns}
elif name == 'snro':
changecol = {i: i.replace('HIERARCH CARACAL FOX SNR ', 'snro') for i in lissnr.columns}
lissnr.rename(columns=changecol, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lissnr = harpsutils.drs_snr_lisobs(lisobs, ords=ords, notfound=notfound, ext=ext)
# Change column names
if name is not None:
kwinst = harpsutils.headerkwinst(inst, outfail=np.nan)
if name == 'ord':
changecol = {i: int(i.replace('{}DRS SPE EXT SN'.format(kwinst), '')) for i in lissnr.columns}
elif name == 'snro':
changecol = {i: i.replace('{}DRS SPE EXT SN'.format(kwinst), 'snro') for i in lissnr.columns}
lissnr.rename(columns=changecol, inplace=True)
elif inst == 'EXPRES':
# EXPRES S/N not in FITS header, get from spectrum
lissnr = expresutils.drs_snr_lisobs(lisobs, ords, name=name)
return lissnr
# Get RV corrections from header
def header_rvcorrection_lisobs(lisobs, inst, name='shift', notfound=np.nan, ext=0):
"""
Get RV correction from header: BERV and drift.
No secular acceleration or nightly drifts.
name : str
Change original header keyword to `name`. Not implemented in `carmenesutils.caracal_rvcorrection_lisobs` yet
"""
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
shift, shifterr, datashift = carmenesutils.caracal_rvcorrection_lisobs(lisobs, use_berv=True, use_drift=True, notfound=notfound)
elif inst == 'HARPS' or inst == 'HARPN':
shift, shifterr, datashift = harpsutils.drs_rvcorrection_lisobs(lisobs, inst, name=name, notfound=notfound, ext=ext)
return shift, shifterr, datashift
# Get RV corrections from SERVAL or if not, from header
def serval_header_rvcorrection_lisobs(lisobs, inst, source='header', servalin=None, obj=None, notfound=np.nan, ext=0, join='outer'):
"""
Parameters
----------
source : {'header', 'serval', 'serval_header', 'none'} or filename
Source from which to get the RV correction.
- `header`: get it from FITS header. Can only get BERV and drift.
- `serval`: get it from SERVAl outputs. Get BERV, drift and sa.
- `serval_header`: try to get it from SERVAL, if not possible or nan, get it from header.
If `serval` or `serval_header`, must provide `servalin`.
- `none`: rv corrections are 0.
- filename containing the RV corrections.
Columns option a): 0) observation name, rest of columns: corrections with header, to be loaded with pandas dataframe. Header names have to be: 'berv', 'sa', 'drift', 'otherdrift' and optionally the errors. Not all columns are necessary.
Columns option b): 0) observation name, 1) rv shift (which is considered as 'other drift') 2) rv shift error (optional).
The columns not present will be 0.
servalin : str or pd.DataFrame
Path to directory containing SERVAL data or pandas dataframe with the necessary data (berv, drift and sa) already loaded.
obj : str
Tag of the SERVAL outputs, e.g. `obj.rvc.dat`. If None (default), try to get it from the files in `servalin` directly.
"""
# Get rv corrections from FITS header
if source == 'header':
shift, shifterr, datashift = header_rvcorrection_lisobs(lisobs, inst, notfound=notfound, ext=ext)
# Get rv corrections SERVAL outputs
elif source == 'serval':
# This function should also work for HARPS
print('servalin', servalin)
shift, shifterr, datashift = carmenesutils.serval_rvcorrection_lisobs(servalin, obj=obj, lisfilobs=lisobs, servalnames=False, use_berv=True, use_drift=True, use_sa=True, join=join)
# Change index
datashift['filobs'] = lisobs
datashift.set_index('filobs', inplace=True)
# Get rv corrections SERVAL outputs, and if not, from FITS header
elif source == 'serval_header':
shift, shifterr, datashift = carmenesutils.serval_caracal_rvcorrection_lisobs(servalin, obj=obj, use_caracal=True, lisfilobs=lisobs, use_berv=True, use_drift=True, use_sa=True, notfound=notfound, ext=ext, verb=True)
# Change index
datashift['filobs'] = lisobs
datashift.set_index('filobs', inplace=True)
# RV corrections = 0
elif source == 'none':
cols = ['berv', 'berverr', 'drift', 'drifterr', 'sa', 'saerr', 'shift', 'shifterr']
a = np.zeros((len(lisobs), len(cols)))
datashift = pd.DataFrame(a, columns=cols, index=lisobs)
shift, shifterr = datashift['shift'], datashift['shifterr']
# Get rv corrections from file
elif os.path.exists(source):
sys.exit('Not implemented yet!')
else:
sys.exit('Source {} not correct'.format(source))
# Add missing columns as nan
cols = ['berv', 'berverr', 'drift', 'drifterr', 'sa', 'saerr', 'otherdrift', 'otherdrifterr']
for c in cols:
if c not in datashift.columns:
datashift[c] = | np.ones_like(shift, dtype=float) | numpy.ones_like |
# -*- coding: utf-8 -*-
"""
Created on Tue May 17 20:01:37 2016
@author: felipe
"""
import numpy as np
import scipy.linalg as sp
from scipy.stats import mvn
from scipy.stats import multivariate_normal
def func(U, epsilon, distance):
# Computes the prob any (U_i) is less than epsilon
ind = np.any(U < epsilon - distance, axis = 1)
return ind
distance =10.0 # distance(nmi)
print("Distance entre avions")
print(distance)
epsilon = 0.1 # Choc distance
Nsim = 10**5 # number of Monte Carlo simulations
npoint = 20 # numper of points in the trajectory
Time = 20.0
v=500.0/60.0 # airplane speed
rc=1.0/57 # param
sigmac=1.0 # param
t = np.linspace(0.1, Time, npoint);
mean = np.zeros((npoint,), dtype = float)
cov = np.zeros((npoint,npoint), dtype = float)
for i in range(npoint):
for j in range(npoint):
cov[i,j] = 2 * sigmac**2 * (1-np.exp(-2*rc*v*min(t[i],t[j])/sigmac)) * np.exp(-rc*v*np.abs(t[i]-t[j])/sigmac)
# Simulation des vecteurs gaussiens
U = np.random.multivariate_normal(mean, cov, size=Nsim)
# Monte Carlo method to calculate the probability
ind_mc = func(U, epsilon, distance)
p_emp_MC = np.mean(ind_mc)
erreur_MC = 1.96*np.sqrt(p_emp_MC*(1-p_emp_MC)/Nsim)
print("MC estimation")
print(p_emp_MC)
print("MC error")
print(erreur_MC)
print("MC intervalle de confiance")
print([p_emp_MC - erreur_MC, p_emp_MC + erreur_MC])
##Importance sampling
C = sp.sqrtm(cov)
G = multivariate_normal.rvs(np.zeros(npoint), np.eye(npoint), size = Nsim)
X = []
Y = []
Si = np.linspace(-0, -distance, 20)
## Look for the best decentrage (in terms of error)
for dec in Si:
dec = -4
#a = dec * np.linspace(0,1,npoint/2)
#b = dec * np.linspace(1,0,npoint/2)
delta = dec * np.linspace(0,1,npoint)
#delta = np.concatenate((a,b))
L = -np.dot(G, delta) - np.dot(delta.T, delta)/2 # likelyhood
ech_IS = func(np.dot(G + delta,C), epsilon, distance) * | np.exp(L) | numpy.exp |
import warnings
import numpy as np
import palpy
from rubin_sim.utils import Site, m5_flat_sed
from .baseStacker import BaseStacker
__all__ = ['NormAirmassStacker', 'ParallaxFactorStacker', 'HourAngleStacker',
'FilterColorStacker', 'ZenithDistStacker', 'ParallacticAngleStacker',
'DcrStacker', 'FiveSigmaStacker', 'SaturationStacker']
class SaturationStacker(BaseStacker):
"""Calculate the saturation limit of a point source. Assumes Guassian PSF.
Parameters
----------
pixscale : float, optional (0.2)
Arcsec per pixel
gain : float, optional (2.3)
electrons per adu
saturation_e : float, optional (150e3)
The saturation level in electrons
zeropoints : dict-like, optional (None)
The zeropoints for the telescope. Keys should be str with filter names, values in mags.
If None, will use Rubin-like zeropoints.
km : dict-like, optional (None)
Atmospheric extinction values. Keys should be str with filter names.
If None, will use Rubin-like zeropoints.
"""
colsAdded = ['saturation_mag']
def __init__(self, seeingCol='seeingFwhmEff', skybrightnessCol='skyBrightness',
exptimeCol='visitExposureTime', nexpCol='numExposures',
filterCol='filter', airmassCol='airmass',
saturation_e=150e3, zeropoints=None, km=None, pixscale=0.2, gain=1.0):
self.units = ['mag']
self.colsReq = [seeingCol, skybrightnessCol, exptimeCol, nexpCol, filterCol, airmassCol]
self.seeingCol = seeingCol
self.skybrightnessCol = skybrightnessCol
self.exptimeCol = exptimeCol
self.nexpCol = nexpCol
self.filterCol = filterCol
self.airmassCol = airmassCol
self.saturation_adu = saturation_e/gain
self.pixscale = 0.2
names = ['u', 'g', 'r', 'i', 'z', 'y']
types = [float]*6
if zeropoints is None:
# Note these zeropoints are calculating the number of *electrons* per second (thus gain=1)
# https://github.com/lsst-pst/syseng_throughputs/blob/master/notebooks/Syseng%20Throughputs%20Repo%20Demo.ipynb
self.zeropoints = np.array([27.03, 28.38, 28.15, 27.86, 27.46, 26.68]).view(list(zip(names, types)))
self.saturation_adu = saturation_e
else:
self.zeropoints = zeropoints
if km is None:
# Also from notebook above
self.km = np.array([0.491, 0.213, 0.126, 0.096, 0.069, 0.170]).view(list(zip(names, types)))
else:
self.km = km
def _run(self, simData, cols_present=False):
for filtername in np.unique(simData[self.filterCol]):
in_filt = np.where(simData[self.filterCol] == filtername)[0]
# Calculate the length of the on-sky time per EXPOSURE
exptime = simData[self.exptimeCol][in_filt] / simData[self.nexpCol][in_filt]
# Calculate sky counts per pixel per second from skybrightness + zeropoint (e/1s)
sky_counts = 10.**(0.4*(self.zeropoints[filtername]
- simData[self.skybrightnessCol][in_filt])) * self.pixscale**2
# Total sky counts in each exposure
sky_counts = sky_counts * exptime
# The counts available to the source (at peak) in each exposure is the
# difference between saturation and sky
remaining_counts_peak = (self.saturation_adu - sky_counts)
# Now to figure out how many counts there would be total, if there are that many in the peak
sigma = simData[self.seeingCol][in_filt]/2.354
source_counts = remaining_counts_peak * 2.*np.pi*(sigma/self.pixscale)**2
# source counts = counts per exposure (expTimeCol / nexp)
# Translate to counts per second, to apply zeropoint
count_rate = source_counts / exptime
simData['saturation_mag'][in_filt] = -2.5*np.log10(count_rate) + self.zeropoints[filtername]
# Airmass correction
simData['saturation_mag'][in_filt] -= self.km[filtername]*(simData[self.airmassCol][in_filt] - 1.)
return simData
class FiveSigmaStacker(BaseStacker):
"""
Calculate the 5-sigma limiting depth for a point source in the given conditions.
This is generally not needed, unless the m5 parameters have been updated
or m5 was not previously calculated.
"""
colsAdded = ['m5_simsUtils']
def __init__(self, airmassCol='airmass', seeingCol='seeingFwhmEff', skybrightnessCol='skyBrightness',
filterCol='filter', exptimeCol='visitExposureTime'):
self.units = ['mag']
self.colsReq = [airmassCol, seeingCol, skybrightnessCol, filterCol, exptimeCol]
self.airmassCol = airmassCol
self.seeingCol = seeingCol
self.skybrightnessCol = skybrightnessCol
self.filterCol = filterCol
self.exptimeCol = exptimeCol
def _run(self, simData, cols_present=False):
if cols_present:
# Column already present in data; assume it needs updating and recalculate.
return simData
filts = np.unique(simData[self.filterCol])
for filtername in filts:
infilt = np.where(simData[self.filterCol] == filtername)
simData['m5_simsUtils'][infilt] = m5_flat_sed(filtername,
simData[infilt][self.skybrightnessCol],
simData[infilt][self.seeingCol],
simData[infilt][self.exptimeCol],
simData[infilt][self.airmassCol])
return simData
class NormAirmassStacker(BaseStacker):
"""Calculate the normalized airmass for each opsim pointing.
"""
colsAdded = ['normairmass']
def __init__(self, airmassCol='airmass', decCol='fieldDec',
degrees=True, telescope_lat = -30.2446388):
self.units = ['X / Xmin']
self.colsReq = [airmassCol, decCol]
self.airmassCol = airmassCol
self.decCol = decCol
self.telescope_lat = telescope_lat
self.degrees = degrees
def _run(self, simData, cols_present=False):
"""Calculate new column for normalized airmass."""
# Run method is required to calculate column.
# Driver runs getColInfo to know what columns are needed from db & which are calculated,
# then gets data from db and then calculates additional columns (via run methods here).
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
dec = simData[self.decCol]
if self.degrees:
dec = np.radians(dec)
min_z_possible = np.abs(dec - np.radians(self.telescope_lat))
min_airmass_possible = 1./np.cos(min_z_possible)
simData['normairmass'] = simData[self.airmassCol] / min_airmass_possible
return simData
class ZenithDistStacker(BaseStacker):
"""Calculate the zenith distance for each pointing.
If 'degrees' is True, then assumes altCol is in degrees and returns degrees.
If 'degrees' is False, assumes altCol is in radians and returns radians.
"""
colsAdded = ['zenithDistance']
def __init__(self, altCol='altitude', degrees=True):
self.altCol = altCol
self.degrees = degrees
if self.degrees:
self.units = ['degrees']
else:
self.unit = ['radians']
self.colsReq = [self.altCol]
def _run(self, simData, cols_present=False):
"""Calculate new column for zenith distance."""
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
if self.degrees:
simData['zenithDistance'] = 90.0 - simData[self.altCol]
else:
simData['zenithDistance'] = np.pi/2.0 - simData[self.altCol]
return simData
class ParallaxFactorStacker(BaseStacker):
"""Calculate the parallax factors for each opsim pointing. Output parallax factor in arcseconds.
"""
colsAdded = ['ra_pi_amp', 'dec_pi_amp']
def __init__(self, raCol='fieldRA', decCol='fieldDec', dateCol='observationStartMJD', degrees=True):
self.raCol = raCol
self.decCol = decCol
self.dateCol = dateCol
self.units = ['arcsec', 'arcsec']
self.colsReq = [raCol, decCol, dateCol]
self.degrees = degrees
def _gnomonic_project_toxy(self, RA1, Dec1, RAcen, Deccen):
"""Calculate x/y projection of RA1/Dec1 in system with center at RAcen, Deccenp.
Input radians.
"""
# also used in Global Telescope Network website
cosc = np.sin(Deccen) * np.sin(Dec1) + np.cos(Deccen) * np.cos(Dec1) * np.cos(RA1-RAcen)
x = np.cos(Dec1) * np.sin(RA1-RAcen) / cosc
y = (np.cos(Deccen)*np.sin(Dec1) - np.sin(Deccen)*np.cos(Dec1)*np.cos(RA1-RAcen)) / cosc
return x, y
def _run(self, simData, cols_present=False):
if cols_present:
# Column already present in data; assume it is correct and does not need recalculating.
return simData
ra_pi_amp = np.zeros(np.size(simData), dtype=[('ra_pi_amp', 'float')])
dec_pi_amp = np.zeros(np.size(simData), dtype=[('dec_pi_amp', 'float')])
ra_geo1 = np.zeros(np.size(simData), dtype='float')
dec_geo1 = np.zeros(np.size(simData), dtype='float')
ra_geo = np.zeros(np.size(simData), dtype='float')
dec_geo = np.zeros( | np.size(simData) | numpy.size |
# @author Metro
# @time 2021/11/11
# happy to be single!
# 暂时不考虑加入参数
# 先实现自己的状态空间,之后的(更多丰富的接口需要去完善一下)
import gym
import os
import numpy as np
import sys
import random
import copy
import traci
import traci.constants as tc
from gym import spaces
from bisect import bisect_left
class FreewheelingIntersectionEnv_v1(gym.Env):
"""
Description:
A traffic signal control simulator environment for an isolated intersection.
We supposed that there is no concept of cycle in the signal control.Hence you may execute one specific phase
repeatedly before the others are executed.
When one particular phase is over, it's time to decide(choose action) which phase(DISCRETE) to execute and its
duration(int(CONTINUOUS)).
It's a RL problem with hybrid action space actually, but if you just want to train and evaluate with a
NORMAL env, just add some confines in env or train.py.
Observation:
Type: Box(512)
# 512 = 32 * 8
# 32 cells in one phase, 8 phases, 2 specific items, speed and location.
# When vehicles are absent in one specific cell, pad it with 0. and 0. w.r.t position and speed.
Num Observation Min Max
0 Phase_0 position 0. 1.
...
7 Phase_7 position 0. 1.
Actions:
Type: Discrete(8)
Num Action
0 NS_straight
1 EW_straight
2 NS_left
3 EW_left
4 N_straight_left
5 E_straight_left
6 S_straight_left
7 W_straight_left
-------------- PLUS ----------
Type: Box(1)
Num Action Min Max
0 The duration of phase you have selected 10 30
Reward:
A combination between vehicle's loss time and queue in one specific phase.
Starting State:
Initialization according to sumo, actually there is no vehicles at the beginning
Episode Termination:
Episode length is greater than SIMULATION_STEPS(3600 in default, for one hour).
"""
def __init__(self):
self.phase_num = 8
self.cells = 32
# the edgeID is defined in FW_Inter.edg.xml
# as you may have different definition in your own .edg.xml, change it in config.
self.edgeIDs = ['north_in', 'east_in', 'south_in', 'west_in']
# vehicle_types will help to filter the vehicles on the same edge but have different direction.
self.vehicle_types = ['NS_through', 'NE_left',
'EW_through', 'ES_left',
'SN_through', 'SW_left',
'WE_through', 'WN_left']
self.phase_transformer = np.array([
[None, 8, 8, 8, 16, 8, 17, 8],
[9, None, 9, 9, 9, 18, 9, 19],
[10, 10, None, 10, 20, 10, 21, 10],
[11, 11, 11, None, 11, 22, 11, 23],
[24, 12, 25, 12, None, 12, 12, 12],
[13, 26, 13, 27, 13, None, 13, 13],
[28, 14, 29, 14, 14, 14, None, 14],
[15, 30, 15, 31, 15, 15, 15, None]
])
self.lane_length = 240.
self.action_pre = None
self.vehicle_IDs_present = None
self.yellow = 3
self.max_queuing_speed = 1.
self.simulation_steps = 1800
# when step() we will save last 'self.N_STEPS' states for state representation
self.episode_steps = 0
self.action_space = spaces.Tuple((
spaces.Discrete(self.phase_num),
spaces.Box(low= | np.array([10]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: segmentation.py
# Author: <NAME> <<EMAIL>>
import numpy as np
from math import ceil
import cv2,colorsys
import pydensecrf.densecrf as dcrf
import os, sys
from tensorpack.utils import logger
from tensorpack.utils.palette import PALETTE_RGB
__all__ = ['update_confusion_matrix', 'predict_slider']
# Colour map. #BGR order.
label_colours = [(35,142,107),(70,70,70),(128,64,128),(142,0,0),(0,0,0)
# 0=vegetarian, 1=building, 2=road 3=vehicle, 4=other
,(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0)
,(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0)
,(0,0,0),(0,0,0),(0,0,0),(0,0,0),(0,0,0)]
# coco
# label_colours = [(0,0,0)
# 0=background
# ,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128)
# # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
# ,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0)
# # 6=bus, 7=car, 8=cat, 9=chair, 10=cow
# ,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128)
# # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person
# ,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)]
# # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
id_to_name = {
0:"background",
1:"aeroplane",
2:"bicycle",
3:"bird",
4:"boat",
5:"bottle",
6:"bus",
7:"car",
8:"cat",
9:"chair",
10:"cow",
11:"diningtable",
12:"dog",
13:"horse",
14:"motorbike",
15:"person",
16:"plant",
17:"sheep",
18:"sofa",
19:"train",
20:"tv/monitor"
}
ignore_color = (255,255,255)
fuzzy_color = (64,0,128)
label2_colours = [(192,128,0),(64,0,128)]
def update_confusion_matrix(pred, label, conf_m, nb_classes, ignore = 255):
ignore_index = label != ignore
seg_gt = label[ignore_index].astype('int32')
seg_pred = pred[ignore_index].astype('int32')
index = (seg_gt * nb_classes + seg_pred).astype('int32')
label_count = np.bincount(index)
for i_label in range(nb_classes):
for i_pred_label in range(nb_classes):
cur_index = i_label * nb_classes + i_pred_label
if cur_index < len(label_count):
conf_m[i_label, i_pred_label] += label_count[cur_index] #notice here, first dimension is label,second dimension is prediction.
return conf_m
def imwrite_grid(image,label,prediction,uncertainty,border,prefix_dir, imageId):
h,w,_ = image.shape
grid_num = h/border
for i in range(grid_num):
for j in range(grid_num):
start_i = border*i
start_j = border*j
end_i = border*(i+1)
end_j = border*(j+1)
cv2.imwrite(os.path.join(prefix_dir,"out{}_patch{}_{}.png".format(imageId,i,j)),
np.concatenate((image[start_i:end_i,start_j:end_j],
visualize_label(label[start_i:end_i,start_j:end_j]),
visualize_label(prediction[start_i:end_i,start_j:end_j]),uncertainty[start_i:end_i,start_j:end_j]), axis=1))
def pad_image(img, target_size):
"""Pad an image up to the target size."""
rows_missing = max(target_size[0] - img.shape[0], 0)
cols_missing = max(target_size[1] - img.shape[1], 0)
try:
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
except Exception as e:
print(str(e))
pass
return padded_img, [0,target_size[0]-rows_missing,0,target_size[1] - cols_missing]
def pad_edge(img, target_size):
"""Pad an image up to the target size."""
rows_missing = max(target_size[0] - img.shape[0], 0)
cols_missing = max(target_size[1] - img.shape[1], 0)
padded_img = np.pad(img, ((0, rows_missing), (0, cols_missing), (0, 0)), 'constant')
return padded_img, [0,target_size[0]-rows_missing,0,target_size[1] - cols_missing]
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
#https://github.com/matterport/Mask_RCNN/blob/master/visualize.py
def visualize_binary_mask(image, label,color, class_num, alpha=0.5):
"""Color classes a good distance away from each other."""
image = np.copy(image)
for ii in range(1,class_num):# background no mask
for c in range(3):
image[:, :, c] = np.where(label == ii,
image[:, :, c] *
(1 - alpha) + alpha * color[c],
image[:, :, c])
return image
def crop_saliency(img, label):
img_copy = np.copy(img)
if len(label.shape) == 2:
label = label[:,:,np.newaxis]*np.ones((1,1,3))
img_copy[label==0] = 255 #white
return img_copy
def visualize_label(label, class_num=21, ignore_label = 255):
"""Color classes a good distance away from each other."""
if len(label.shape) == 3:
label = np.squeeze(label)
h, w = label.shape
img_color = np.zeros((h, w, 3)).astype('uint8')
if class_num == 2:#if two class, using white-black colormap to enlarge contrast
my_label_colours = [(255, 255, 255),(0, 0, 0)]
else:
if class_num > 21:
my_label_colours = [PALETTE_RGB[i][::-1] for i in range(class_num)]
else:
my_label_colours = label_colours
for i in range(0,class_num):
img_color[label == i] = my_label_colours[i]
img_color[label==ignore_label] = ignore_color#highlight ignore label
return img_color
def visualize_uncertainty(prob):
prob = np.amax(prob,axis=2,keepdims=False)*255
return prob
def visualize_strict_uncertainty(prob,label):
h,w,c = prob.shape
gt = np.reshape(label,(h*w))
prob = np.reshape(prob,(h*w,c))
gt_idx = np.where(gt > -1)[0]
idx = np.vstack((gt_idx, gt))
tmp = prob[list(idx)] #TODO advance index in numpy, here is buggy, because 255 ignore,index 255 is out of bounds for axis 1 with size 21
tmp = tmp*255
tmp = | np.reshape(tmp,(w,h)) | numpy.reshape |
import pandas as pd
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import finterstellar as fs
pd.plotting.deregister_matplotlib_converters()
font = 'NanumSquareRound, AppleGothic, Malgun Gothic, DejaVu Sans'
class Visualize:
today = '(' + pd.to_datetime('today').date().strftime("%y%m%d") + ') '
today_str = pd.to_datetime('today').date().strftime("%Y%m%d")
def __init__(self):
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = font
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['grid.linestyle'] = '--'
plt.rcParams['grid.alpha'] = 0.7
plt.rcParams['lines.antialiased'] = True
plt.rcParams['figure.figsize'] = [15.0, 7.0]
plt.rcParams['savefig.dpi'] = 96
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['figure.titlesize'] = 'medium'
def price_view(self, df, b_date, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
for c in cds:
plt.plot(x, df.loc[b_date:, c], label=c)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' price_view.png', bbox_inches='tight')
def index_view(self, df, b_date, cd, size=(15,7), make_file=False):
if isinstance(df.index[0], dt.date):
b_date = fs.check_base_date(df, b_date)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
cds = fs.str_list(cd)
for c in cds:
plt.plot(x, df.loc[b_date:, c] / df.loc[b_date, c] * 100, label=c)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' index_view.png', bbox_inches='tight')
def complex_view(self, df, b_date, cd_a, cd_b, size=(15,7), make_file=False):
cds_a = fs.str_list(cd_a)
cds_b = fs.str_list(cd_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.fill_between(x, df.loc[b_date:, c], 0, facecolor='C'+str(i), alpha=0.3)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+self.today+cds_a[0]+' complex_view.png', bbox_inches='tight')
def multi_line_view(self, df, b_date, cd_a, cd_b, size=(15,7), make_file=False):
cds_a = fs.str_list(cd_a)
cds_b = fs.str_list(cd_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
pass
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c, alpha=0.7)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+self.today+cds_a[0]+' multi_line_view.png', bbox_inches='tight')
def position_view(self, df, cd, size=(15,1), make_file=False, file_name=''):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.fill_between(x, df['ps'+c], 0, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
plt.legend()
if make_file:
f_name = file_name+'_position_view.png'
plt.savefig('./image/'+f_name, bbox_inches='tight')
def position_view_bar(self, df, cd, size=(15,1), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
x_ticks = self.time_serial(df)
plt.xticks(x_ticks[0], x_ticks[1])
plt.autoscale(True, axis='x')
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.bar(range(x.size), df['ps'+c], width=1, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' position_view.png', bbox_inches='tight')
def pair_trend_index_view(self, df, trd, cd, size=(15,7), make_file=False, file_name=''):
fig, ax1 = plt.subplots(figsize=size)
x = df.index
ax1.fill_between(x, df[cd[1]+' expected']*(1+trd), df[cd[1]+' expected']*(1-trd), facecolor='sienna', alpha=0.2)
ax1.plot(x, df[cd[1]+' expected'], 'sienna', linestyle='--')
ax1.plot(x, df[cd[1]], 'C1', lw=3)
ax2 = ax1.twinx()
ax2.plot(x, df[cd[0]], 'C0', alpha=0.7)
ax1.plot(np.nan, 'C0', label=cd[0])
ax1.legend(loc=0)
if make_file:
f_name = file_name+'_pair_trend_view.png'
plt.savefig('./image/'+f_name, bbox_inches='tight')
return()
def pair_trend_price_view(self, df, trd, cd, size=(15,7), make_file=False):
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df[cd[1]+' expected']*(1+trd), df[cd[1]+' expected']*(1-trd), facecolor='sienna', alpha=0.2)
plt.plot(x, df[cd[1]+' expected'], 'sienna', linestyle='--')
plt.plot(x, df[cd[0]], 'C0')
plt.plot(x, df[cd[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cd[0]+' pair_trend_price_view.png', bbox_inches='tight')
def BB_trend_view(self, df, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df['lb'], df['ub'], facecolor='sienna', alpha=0.2)
plt.plot(x, df['center'], color='sienna', linestyle='--', label='MA')
plt.plot(x, df[cds[0]], color='C0', linestyle='-', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' bb_trend_view.png', bbox_inches='tight')
def futures_basis_view(self, df, threshold, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.autoscale(True, axis='both')
plt.fill_between(x, df[cds[0]], df[cds[0]]+df['basis'], facecolor='sienna', alpha=0.2)
plt.plot(x, df[cds[0]], 'sienna', linestyle='--')
plt.plot(x, df[cds[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' futures_basis_view.png', bbox_inches='tight')
def value_at_expiry_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s + pd.Series(value)
plt.plot(x, s, linewidth=3, color='red', label='Synthetic')
else:
for key, value in y.items():
plt.plot(x, value, linewidth=3, color='red', label=key)
step = ( x.max() - x.min() + 1 ) / 4
plt.yticks(np.arange(0-step*2, 0+step*3, step))
plt.ylim(0-step*2, 0+step*2)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+' value_at_expiry_view.png', bbox_inches='tight')
def square_one_to_one_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s + pd.Series(value)
plt.plot(x, s, linewidth=3, color='red', label='Synthetic')
else:
for key, value in y.items():
plt.plot(x, value, linewidth=3, color='red', label=key)
step = ( x.max() - x.min() + 1 ) / 4
plt.yticks(np.arange(0-step*2, 0+step*3, step))
plt.ylim(0-step*2, 0+step*2)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+' square_one_to_one_view.png', bbox_inches='tight')
def square_free_plot_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s + pd.Series(value)
plt.plot(x, s, linewidth=3, color='red', label='Synthetic')
else:
for key, value in y.items():
plt.plot(x, value, linewidth=3, color='red', label=key)
plt.legend()
if make_file:
plt.savefig('./image/'+Visualize.today+' square_free_plot_view.png', bbox_inches='tight')
def square_scatter_view(self, x, y, make_file=False, size=(7,7)):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
plt.scatter(x, y, linewidth=3, color='red')
step = ( x.max() - x.min() + 1 ) / 4
plt.legend()
if make_file:
plt.savefig('./image/'+Visualize.today+' square_free_plot_view.png', bbox_inches='tight')
def time_serial(self, df):
chart = pd.DataFrame()
chart = df.copy()
chart.reset_index(inplace=True)
sequence = []
xlabels = []
if isinstance(chart.iloc[0, 0], dt.date):
first = chart.iloc[0, 0]
last = chart.iloc[-1, 0]
delta = last - first
if delta.days >= 730:
time_series = pd.date_range(first, last, freq='YS')
elif delta.days >= 365:
time_series = pd.date_range(first, last, freq='QS')
elif delta.days >= 180:
time_series = pd.date_range(first, last, freq='2MS')
elif delta.days >= 90:
time_series = pd.date_range(first, last, freq='MS')
elif delta.days >= 60:
time_series = pd.date_range(first, last, freq='SMS')
elif delta.days >= 30:
time_series = pd.date_range(first, last, freq='5B')
elif delta.days >= 10:
time_series = pd.date_range(first, last, freq='2B')
elif delta.days >= 5:
time_series = pd.date_range(first, last, freq='D')
else:
time_series = chart.iloc[:, 0]
sequence.append(first)
if delta.days >= 180:
xlabels.append(first.strftime('%y.%m.%d'))
else:
xlabels.append(first.strftime('%m.%d'))
for d in time_series:
d = fs.check_base_date(df, d)
s = chart[chart.iloc[:, 0]==d].iloc[0].tolist()
sequence.append(s[0])
l = d.strftime('%y.%m.%d')
if delta.days >= 180:
l = d.strftime('%y.%m.%d')
else:
l = d.strftime('%m.%d')
xlabels.append(l)
sequence.append(last)
if delta.days >= 180:
xlabels.append(last.strftime('%y.%m.%d'))
else:
xlabels.append(last.strftime('%m.%d'))
if sequence[0] == sequence[1]:
del sequence[0]
del xlabels[0]
if sequence[-1] == sequence[-2]:
del sequence[-1]
del xlabels[-1]
return(sequence, xlabels)
'''
intraday charting
'''
class VisualizeIntraday:
today = '(' + pd.to_datetime('today').date().strftime("%y%m%d") + ') '
def __init__(self):
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = font
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['grid.linestyle'] = '--'
plt.rcParams['grid.alpha'] = 0.7
plt.rcParams['lines.antialiased'] = True
plt.rcParams['figure.figsize'] = [15.0, 7.0]
plt.rcParams['savefig.dpi'] = 96
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['figure.titlesize'] = 'medium'
def price_view(self, df, b_date, s_cd, size=(15,7), make_file=False):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
for c in cds:
plt.plot(x, df.loc[b_date:, c], label=c)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+cds[0]+' price_view.png', bbox_inches='tight')
def index_view(self, df, b_date, s_cd, size=(15,7), make_file=False):
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
cds = fs.str_list(s_cd)
for c in cds:
plt.plot(x, df.loc[b_date:, c] / df.loc[b_date, c] * 100, label=c)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+Visualize.today+s_cd[0]+' index_view.png', bbox_inches='tight')
def complex_view(self, df, b_date, cd_set_a, cd_set_b=[], size=(15,7), make_file=False):
cds_a = fs.str_list(cd_set_a)
cds_b = fs.str_list(cd_set_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.fill_between(x, df.loc[b_date:, c], 0, facecolor='C'+str(i), alpha=0.3)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
ax1.set_xticks(np.arange(0, x_length+jump, jump))
ax1.set_xticklabels(xs, rotation=45)
ax2.set_xticks(np.arange(0, x_length+jump, jump))
ax2.set_xticklabels(xs, rotation=45)
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+Visualize.today+cds_a[0]+' complex_view.png', bbox_inches='tight')
def multi_line_view(self, df, b_date, cd_set_a, cd_set_b=[], size=(15,7), make_file=False):
cds_a = fs.str_list(cd_set_a)
cds_b = fs.str_list(cd_set_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
pass
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c, alpha=0.7)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
ax1.set_xticks(np.arange(0, x_length+jump, jump))
ax1.set_xticklabels(xs, rotation=45)
ax2.set_xticks(np.arange(0, x_length+jump, jump))
ax2.set_xticklabels(xs, rotation=45)
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+Visualize.today+cds_a[0]+' multi_line_view.png', bbox_inches='tight')
def position_view(self, df, s_cd, size=(15,1), make_file=False):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.fill_between(x, df['ps'+c], 0, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+cds[0]+' position_view.png', bbox_inches='tight')
def pair_trend_price_view(self, df, thd, s_cd, make_file=False, size=(15,7)):
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df[s_cd[1]+' expected']*(1+thd), df[s_cd[1]+' expected']*(1-thd), facecolor='sienna', alpha=0.2)
plt.plot(x, df[s_cd[1]+' expected'], 'sienna', linestyle='--')
plt.plot(x, df[s_cd[0]], 'C0')
plt.plot(x, df[s_cd[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+s_cd[0]+' pairs_trend_price_view.png', bbox_inches='tight')
def pair_trend_index_view(self, df, thd, s_cd, make_file=False, size=(15,7)):
fig, ax1 = plt.subplots(figsize=size)
x = df.index
ax1.fill_between(x, df[s_cd[1]+' expected']*(1+thd), df[s_cd[1]+' expected']*(1-thd), facecolor='sienna', alpha=0.2)
ax1.plot(x, df[s_cd[1]+' expected'], 'sienna', linestyle='--')
ax1.plot(x, df[s_cd[1]], 'C1', lw=3)
ax2 = ax1.twinx()
ax2.plot(x, df[s_cd[0]], 'C0', alpha=0.7)
ax1.plot(np.nan, 'C0', label=s_cd[0])
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
ax1.set_xticks(np.arange(0, x_length+jump, jump))
ax1.set_xticklabels(xs, rotation=45)
ax2.set_xticks(np.arange(0, x_length+jump, jump))
ax2.set_xticklabels(xs, rotation=45)
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+s_cd[0]+' pairs_trend_index_view.png', bbox_inches='tight')
def BB_trend_view(self, sample, sigma, s_cd, make_file=False, size=(15,7)):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = sample.index
plt.fill_between(x, sample['lb'], sample['ub'], facecolor='sienna', alpha=0.2)
plt.plot(x, sample['center'], color='sienna', linestyle='--', label='MA')
plt.plot(x, sample[cds[0]], color='C0', linestyle='-', lw=3)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks( | np.arange(0, x_length+jump, jump) | numpy.arange |
"""peca summary table and plots"""
from collections import Counter
from pathlib import Path
from sys import stdout
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
NPROT = sum(1 for line in open("X.txt"))-1
NSAMPLES = sum(1 for line in open("s_RR"))
NREPS, NTIME = (int(x) for x in open("attr.txt").readline().rstrip().split('\t')[:2])
#print(NREPS, NTIME)
RR = np.zeros((NPROT, NTIME-1))
for line in (l.rstrip().split('\t') for l in open("s_RR")):
RR += np.log(np.array(line, dtype=float)).reshape(NPROT, NTIME-1)
RR = np.exp(RR/NSAMPLES)
CPR = np.zeros((NPROT, NTIME-2))
for line in (l.rstrip().split('\t') for l in open("s_CPR")):
CPR += np.array(line, dtype=int).reshape(NPROT, NTIME-2)
CPR /= NSAMPLES
DD = np.zeros((NPROT, NTIME-1))
for line in (l.rstrip().split('\t') for l in open("s_DD")):
DD += np.log(np.array(line, dtype=float)).reshape(NPROT, NTIME-1)
DD = np.exp(DD/NSAMPLES)
CPD = np.zeros((NPROT, NTIME-2))
for line in (l.rstrip().split('\t') for l in open("s_CPD")):
CPD += np.array(line, dtype=int).reshape(NPROT, NTIME-2)
CPD /= NSAMPLES
### append the fdr columns
#for syn
CNT = Counter(CPR.flatten())
SORTKEY = sorted(CNT, reverse=True)
FDRMAP = dict()
DENOM = CNT[SORTKEY[0]]
NUMER = (1-SORTKEY[0])*DENOM
for key in SORTKEY:
FDRMAP[key] = NUMER/DENOM
NUMER += (1-key)*CNT[key]
DENOM += CNT[key]
#for deg
CNT = Counter(CPD.flatten())
SORTKEY = sorted(CNT, reverse=True)
FDRMAP_D = dict()
DENOM = CNT[SORTKEY[0]]
NUMER = (1-SORTKEY[0])*DENOM
for key in SORTKEY:
FDRMAP_D[key] = NUMER/DENOM
NUMER += (1-key)*CNT[key]
DENOM += CNT[key]
MPATH = Path("M.txt").is_file()
with open('normX.txt') as xX, \
open('normH.txt') as hH, \
(open('normM.txt') if MPATH else open('normH.txt')) as mM, \
open('data_R_CPS.txt', 'w') as cp_:
#cp_.write(xX.readline().rstrip().split('\t', 1)[1] \
# +('\t'+mM.readline().rstrip().split('\t', 1)[1] if MPATH else '') \
# +'\t'+hH.readline().rstrip().split('\t', 1)[1] \
cp_.write('\t'.join(z+"_X"+str(int(n/NTIME))+"t"+str(n%NTIME) \
for n, z in enumerate(xX.readline().rstrip().split('\t')[1:])) \
+('\t'+'\t'.join(z+"_M"+str(int(n/NTIME))+"t"+str(n%NTIME) \
for n, z in enumerate(mM.readline().rstrip().split('\t')[1:])) if MPATH else '') \
+'\t'+'\t'.join(z+"_Y"+str(int(n/NTIME))+"t"+str(n%NTIME) \
for n, z in enumerate(hH.readline().rstrip().split('\t')[1:])) \
+'\t'+'\t'.join(['R'+str(i) for i in range(NTIME-1)]) \
+'\t'+'\t'.join(['D'+str(i) for i in range(NTIME-1)]) \
+'\t'+'\t'.join(['signedCPS'+str(i) for i in range(1, NTIME-1)]) \
+'\t'+'\t'.join(['signedCPD'+str(i) for i in range(1, NTIME-1)]) \
+'\t'+'\t'.join(['FDR_S'+str(i) for i in range(1, NTIME-1)]) \
+'\t'+'\t'.join(['FDR_D'+str(i) for i in range(1, NTIME-1)]) \
+'\n')
for i in range(NPROT):
cp_.write(xX.readline().rstrip() \
+('\t'+mM.readline().rstrip().split('\t', 1)[1] if MPATH else '') \
+'\t'+hH.readline().rstrip().split('\t', 1)[1] \
+'\t'+'\t'.join(str(x) for x in RR[i,]) \
+'\t'+'\t'.join(str(x) for x in DD[i,]) \
+'\t'+'\t'.join(str(x*(1 if RR[i, n] > RR[i, n-1] else -1)) \
for n, x in enumerate(CPR[i,], 1)) \
+'\t'+'\t'.join(str(x*(1 if DD[i, n] > DD[i, n-1] else -1)) \
for n, x in enumerate(CPD[i,], 1)) \
+'\t'+'\t'.join(str(FDRMAP[x]) for x in CPR[i,]) \
+'\t'+'\t'.join(str(FDRMAP_D[x]) for x in CPD[i,]) \
+'\n')
###loglikelihood traceplot########################
#plt.figure().set_size_inches(9, 9)
plt.title('loglikelihood traceplot')
plt.plot(range(1, NSAMPLES+1), [float(x) for x in open('s_loglike').readlines()])
plt.savefig('trace_loglike.pdf')
##################################################
EFSH = dict()
XAX = []
with open('EfsH.txt') as efsh:
XAX = efsh.readline().rstrip('\n').split('\t')[3:]
for line in (l.rstrip('\n').split('\t') for l in efsh):
EFSH['\t'.join(line[:3])] = line[3:]
EFSM = dict()
if MPATH:
with open('EfsM.txt') as efsm:
efsm.readline()
for line in (l.rstrip('\n').split('\t') for l in efsm):
EFSM['\t'.join(line[:3])] = line[3:]
EFSX = dict()
EFSXPATH = Path("EfsX.txt").is_file()
if EFSXPATH:
with open('EfsX.txt') as efsx:
efsx.readline()
for line in (l.rstrip('\n').split('\t') for l in efsx):
EFSX['\t'.join(line[:3])] = line[3:]
ETAH = np.array(open('mean_etaH').readline().rstrip().split('\t'), \
dtype=float).reshape(NPROT, NTIME*NREPS)
if MPATH:
ETAM = np.array(open('mean_etaM').readline().rstrip().split('\t'), \
dtype=float).reshape(NPROT, NTIME*NREPS)
with PdfPages('mRNAprot.pdf') as pdf, \
open('X.txt') as xx, \
open('H.txt') as hh, \
(open('M.txt') if MPATH else open('H.txt')) as mm, \
open('normX.txt') as xX, \
open('normH.txt') as hH, \
(open('normM.txt') if MPATH else open('normH.txt')) as mM:
PR = 2
LOGHY = 'log(protein) '
if MPATH:
PR = 3
mm.readline()
mM.readline()
LOGHY = 'log(H) '
xx.readline()
hh.readline()
xX.readline()
hH.readline()
for p, linex in enumerate(xx):
x = [(i if i != 'NA' else np.nan) for i in linex.rstrip().split('\t')[1:]]
x = np.array(x, dtype=float)
X = np.array(xX.readline().rstrip().split('\t')[1:], dtype=float)
h = [(i if i != 'NA' else np.nan) for i in hh.readline().rstrip().split('\t')[1:]]
h = np.array(h, dtype=float)
H = np.array(hH.readline().rstrip().split('\t')[1:], dtype=float)
if MPATH:
m = [(i if i != 'NA' else np.nan) for i in mm.readline().rstrip().split('\t')[1:]]
m = np.array(m, dtype=float)
M = np.array(mM.readline().rstrip().split('\t')[1:], dtype=float)
#if p > 10:
# break
plt.figure(figsize=((NREPS+2)*4, PR*4))
plt.suptitle(linex.rstrip().split('\t', 1)[0])
print('\x08'*99, p, '/', NPROT, end=' ')
stdout.flush()
for j in range(NREPS):
plt.subplot(PR, NREPS+2, j+1).set_title('log(mRNA) '+str(j+1))
plt.ylim(np.nanmin(x), np.nanmax(x))
plt.scatter(range(NTIME), x[NTIME*j:NTIME*(j+1)], c='k')
plt.scatter(range(NTIME), X[NTIME*j:NTIME*(j+1)], \
facecolors='none', \
edgecolors=np.where(np.isnan(x[NTIME*j:NTIME*(j+1)]), 'red', 'black'))
if EFSXPATH:
plt.plot(XAX, EFSX[str(p)+'\t0\t'+str(j)], 'k-')
plt.subplot(PR, NREPS+2, NREPS+1).set_title('Protein synthesis')
plt.xlim(0, NTIME-1)
plt.step(np.arange(0.5, NTIME-0.5), RR[p,], 'k-', where='mid')
plt.ylim(0)
plt.subplot(PR, NREPS+2, NREPS+2).set_title('Protein degradation')
plt.xlim(0, NTIME-1)
plt.step(np.arange(0.5, NTIME-0.5), DD[p,], 'k-', where='mid')
plt.ylim(0)
for j in range(NREPS):
plt.subplot(PR, NREPS+2, NREPS+3+j).set_title(LOGHY+str(j+1))
plt.ylim(np.nanmin(h), | np.nanmax(h) | numpy.nanmax |
#https://gitlab.com/custom_robots/spotmicroai/simulation/-/blob/master/Basic%20simulation%20by%20user%20Florian%20Wilk/Kinematics/Kinematic.ipynb
from mpl_toolkits import mplot3d
import numpy as np
from math import *
import matplotlib.pyplot as plt
def setupView(limit):
ax = plt.axes(projection="3d")
ax.set_xlim(-limit, limit)
ax.set_ylim(-limit, limit)
ax.set_zlim(-limit, limit)
ax.set_xlabel("X")
ax.set_ylabel("Z")
ax.set_zlabel("Y")
return ax
setupView(200).view_init(elev=12., azim=28)
omega = pi/4
phi =0
psi = 0
xm = 0
ym = 0
zm = 0
L = 207.5
W = 78
l1=60.5
l2=10
l3=100.7
l4=118.5
Lp=np.array([[100,-100,100,1],[100,-100,-100,1],[-100,-100,100,1],[-100,-100,-100,1]])
sHp=np.sin(pi/2)
cHp=np.cos(pi/2)
Lo=np.array([0,0,0,1])
def bodyIK(omega,phi,psi,xm,ym,zm):
Rx = np.array([[1,0,0,0],
[0,np.cos(omega),-np.sin(omega),0],
[0,np.sin(omega), | np.cos(omega) | numpy.cos |
#Copyright (C) 2021 <NAME>, <NAME>, University of California, Berkeley
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../external'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../src'))
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import numpy as np
#np.random.seed(42)
from vtk_utils.vtk_utils import *
from pre_process import *
import argparse
from datetime import datetime
import scipy.sparse as sp
import pickle
from scipy.sparse.linalg.eigen.arpack import eigsh
def build_transform_matrix(image):
matrix = np.eye(4)
matrix[:-1,:-1] = np.matmul(np.reshape(image.GetDirection(), (3,3)), np.diag(image.GetSpacing()))
matrix[:-1,-1] = np.array(image.GetOrigin())
return matrix
def map_polydata_coords(poly, displacement, transform, size):
coords = vtk_to_numpy(poly.GetPoints().GetData())
coords += displacement
coords = np.concatenate((coords,np.ones((coords.shape[0],1))), axis=-1)
coords = np.matmul(np.linalg.inv(transform), coords.transpose()).transpose()[:,:3]
coords /= np.array(size)
return coords
def transform_polydata(poly, displacement, transform, size):
coords = map_polydata_coords(poly, displacement, transform, size)
poly.GetPoints().SetData(numpy_to_vtk(coords))
return poly
def get_image_patch(image_py, coords):
"""
return a patch of the image defined under coords, the coords should be in [0,1]R^3
"""
dim_x, dim_y, dim_z = image_py.shape
indices = coords * np.array([[dim_x, dim_y, dim_z]])
x1 = np.floor(indices[:,0]).astype(int)
y1 = np.floor(indices[:,1]).astype(int)
z1 = np.floor(indices[:,2]).astype(int)
x2 = np.ceil(indices[:,0]).astype(int)
y2 = np.ceil(indices[:,1]).astype(int)
z2 = np.ceil(indices[:,2]).astype(int)
q11 = image_py[x1, y1, z1]
q21 = image_py[x2, y1, z1]
q12 = image_py[x1, y2, z1]
q22 = image_py[x2, y2, z1]
wx = indices[:, 0] - x1
wx2 = x2 - indices[:, 0]
lerp_x1 = q21 * wx + q11 * wx2
lerp_x2 = q12 * wx + q22 * wx2
wy = indices[:, 1] - y1
wy2 = y2 - indices[:, 1]
lerp_y1 = lerp_x2 * wy + lerp_x1 * wy2
q112 = image_py[x1, y1, z2]
q212 = image_py[x2, y1, z2]
q122 = image_py[x1, y2, z2]
q222 = image_py[x2, y2, z2]
lerp_x12 = q212 * wx + q112 * wx2
lerp_x22 = q122 * wx + q222 * wx2
lerp_y12 = lerp_x22 * wy + lerp_x12 * wy2
wz = indices[:, 2] - z1
wz2 = z2 - indices[:,2]
lerp_z = lerp_y12 * wz + lerp_y1 * wz2
return lerp_z
def make_grid_vtk(ctrl_points, diagonal=True):
# assume equal number of control points along each dim
num_pts = int(round(len(ctrl_points)**(1/3)))
# points
grid = vtk.vtkPolyData()
vtk_points = vtk.vtkPoints()
vtk_points.SetData(numpy_to_vtk(ctrl_points))
grid.SetPoints(vtk_points)
# edges
lines = vtk.vtkCellArray()
for i in range(num_pts):
for j in range(num_pts):
for k in range(num_pts-1):
id1 = i*num_pts*num_pts+j*num_pts +k
ids = []
ids.append(i*num_pts*num_pts+j*num_pts +k+1)
if diagonal:
if j<num_pts-1:
ids.append(i*num_pts*num_pts+(j+1)*num_pts +k+1)
if i < num_pts-1:
ids.append((i+1)*num_pts*num_pts+(j+1)*num_pts +k+1)
if i >0:
ids.append((i-1)*num_pts*num_pts+(j+1)*num_pts +k+1)
if j>0:
ids.append(i*num_pts*num_pts+(j-1)*num_pts +k+1)
if i < num_pts-1:
ids.append((i+1)*num_pts*num_pts+(j-1)*num_pts +k+1)
if i >0:
ids.append((i-1)*num_pts*num_pts+(j-1)*num_pts +k+1)
#if i<num_pts-1:
# ids.append((i+1)*num_pts*num_pts+(j+1)*num_pts +k)
for id_p in ids:
line = vtk.vtkLine()
line.GetPointIds().SetId(0, id1)
line.GetPointIds().SetId(1, id_p)
lines.InsertNextCell(line)
for i in range(num_pts):
for j in range(num_pts-1):
for k in range(num_pts):
id1 = i*num_pts*num_pts+j*num_pts +k
ids = []
ids.append(i*num_pts*num_pts+(j+1)*num_pts +k)
if diagonal:
if i<num_pts-1:
ids.append((i+1)*num_pts*num_pts+(j+1)*num_pts +k)
if i>0:
ids.append((i-1)*num_pts*num_pts+(j+1)*num_pts +k)
for id_p in ids:
line = vtk.vtkLine()
line.GetPointIds().SetId(0, id1)
line.GetPointIds().SetId(1, id_p)
lines.InsertNextCell(line)
for i in range(num_pts-1):
for j in range(num_pts):
for k in range(num_pts):
id1 = i*num_pts*num_pts+j*num_pts +k
ids = []
ids.append((i+1)*num_pts*num_pts+j*num_pts +k)
if diagonal:
if k<num_pts-1:
ids.append((i+1)*num_pts*num_pts+j*num_pts +k+1)
if k>0:
ids.append((i+1)*num_pts*num_pts+j*num_pts +k-1)
for id_p in ids:
line = vtk.vtkLine()
line.GetPointIds().SetId(0, id1)
line.GetPointIds().SetId(1, id_p)
lines.InsertNextCell(line)
grid.SetLines(lines)
return grid
def make_grid(num_pts, bounds, diagonal=True):
# compute bounding box of the template
min_bound, max_bound = bounds
# create control points
x = np.linspace(min_bound[0], max_bound[0], num_pts, endpoint=True)
y = np.linspace(min_bound[1], max_bound[1], num_pts, endpoint=True)
z = np.linspace(min_bound[2], max_bound[2], num_pts, endpoint=True)
# create vtk polydata
u, v, w = np.meshgrid(x, y, z, indexing='ij')
coords = np.column_stack((u.flatten(), v.flatten(), w.flatten()))
grid = make_grid_vtk(coords, diagonal)
#write_vtk_polydata(grid, os.path.join(os.path.dirname(__file__), 'grid_pts{}.vtk'.format(num_pts)))
return grid
def load_geometry_from_file(fn, target_node_num):
template = load_vtk_mesh(fn)
try:
region_ids = np.unique(vtk_to_numpy(template.GetCellData().GetArray('Scalars_'))).astype(int)
except:
region_ids = np.unique(vtk_to_numpy(template.GetCellData().GetArray('RegionId'))).astype(int)
print("Unique ids of template mesh: ", region_ids)
struct_list = []
node_list = [0]
total_node = 0
face_list = []
region_id = []
for i in region_ids:
poly_i = thresholdPolyData(template, 'Scalars_', (i, i),'cell')
if poly_i.GetNumberOfPoints() == 0:
poly_i = thresholdPolyData(template, 'RegionId', (i, i),'cell')
num_pts = poly_i.GetNumberOfPoints()
rate = max(0., 1. - float(target_node_num)/num_pts)
print("Target reduction rate of structure: ", i, target_node_num, num_pts, rate)
poly_i = decimation(poly_i, rate)
total_node += poly_i.GetNumberOfPoints()
node_list.append(total_node)
struct_list.append(poly_i)
cells = vtk_to_numpy(poly_i.GetPolys().GetData())
cells = cells.reshape(poly_i.GetNumberOfCells(), 4)
cells = cells[:,1:]
region_id += list(np.ones(poly_i.GetNumberOfCells())*i)
face_list.append(cells)
template_deci = appendPolyData(struct_list)
region_id_vtk = numpy_to_vtk(region_id)
region_id_vtk.SetName('Scalars_')
template_deci.GetCellData().AddArray(region_id_vtk)
return template_deci, node_list, face_list
def process_template(template_fn, target_node_num=None, template_im_fn=None, ref_template_fn=None):
if target_node_num is None:
template = load_vtk_mesh(template_fn)
node_list = [template.GetNumberOfPoints()]
face_list = vtk_to_numpy(template.GetPolys().GetData()).reshape(template.GetNumberOfCells(), 4)[:, 1:]
else:
template, node_list, face_list = load_geometry_from_file(template_fn, target_node_num)
if template_im_fn is None:
coords = vtk_to_numpy(template.GetPoints().GetData())
if ref_template_fn is not None:
ref = load_vtk_mesh(ref_template_fn)
ref_coords = vtk_to_numpy(ref.GetPoints().GetData())
ref_mean = np.mean(ref_coords, axis=0)
coords -= ref_mean
ref_coords -= ref_mean
ref_nrm = np.max(np.linalg.norm(ref_coords, axis=1))
coords /= ref_nrm * 1.8
ref_coords /= ref_nrm * 1.8
ref_coords += np.array([0.5, 0.5, 0.5])
else:
mean = np.mean(coords, axis=0)
coords -= mean
coords /= np.max(np.linalg.norm(coords, axis=1)) * 1.8
coords += np.array([0.5, 0.5, 0.5])
template.GetPoints().SetData(numpy_to_vtk(coords))
else:
SIZE = (128, 128, 128)
imgVol_o = sitk.ReadImage(template_im_fn)
img_center = np.array(imgVol_o.TransformContinuousIndexToPhysicalPoint(np.array(imgVol_o.GetSize())/2.0))
imgVol = resample_spacing(imgVol_o, template_size=SIZE, order=1)[0] # numpy array
img_center2 = np.array(imgVol.TransformContinuousIndexToPhysicalPoint(np.array(imgVol.GetSize())/2.0))
transform = build_transform_matrix(imgVol)
template = transform_polydata(template, img_center2-img_center, transform, SIZE)
coords = vtk_to_numpy(template.GetPoints().GetData())
#write_vtk_polydata(template, os.path.join(os.path.dirname(__file__), datetime.now().strftime("%m_%d_%Y_%H_%M_%S")+'_template_'+os.path.basename(template_fn)))
write_vtk_polydata(template, os.path.join(os.path.dirname(__file__), '../examples/template_with_veins_normalized.vtp'))
if ref_template_fn is not None:
bounds = (np.min(ref_coords, axis=0), np.max(ref_coords, axis=0))
else:
bounds = (np.min(coords, axis=0), np.max(coords, axis=0))
return template, node_list, face_list, bounds
from math import factorial
def comb(n, k):
return factorial(n) / factorial(k) / factorial(n - k)
def ffd(ctrl_pts, tmplt_coords, bounds):
'''
Ctrl points or d_cntrl points should be in world coordinates
Tmple_coords is in world coordinates and will be normalized to grid coordinates
'''
min_bound, max_bound = bounds
tmplt_coords = tmplt_coords - np.expand_dims(min_bound, axis=0)
tmplt_coords /= np.expand_dims(max_bound - min_bound, axis=0)
num_pts = int(round(len(ctrl_pts)**(1/3)))
# Bernstein tensor
B = []
for i in range(num_pts):
for j in range(num_pts):
for k in range(num_pts):
coeff = comb(num_pts-1, k) * comb(num_pts-1, j) * comb(num_pts-1, i)
b_list = coeff * ((1 - tmplt_coords[:,0]) ** (num_pts-1 - i)) * (tmplt_coords[:,0] ** i) \
* ((1 - tmplt_coords[:,1]) ** (num_pts-1 - j)) * (tmplt_coords[:,1] ** j)\
* ((1 - tmplt_coords[:,2]) ** (num_pts-1 - k)) * (tmplt_coords[:,2] ** k)
B.append(b_list)
B = np.stack(B, axis=1)
B[B<1e-5] = 0.
s_B = sp.csr_matrix(B, copy=True)
print("Number of elements in grid matrix: ", len(sparse_to_tuple(s_B)[1]))
output = s_B.dot(ctrl_pts)
#output = np.matmul(B, ctrl_pts)
return output, sparse_to_tuple(s_B)
def construct_bspline_volume(ctrl_pts, tmplt_coords, bounds, order=3):
min_bound, max_bound = bounds
num_pts = int(round(len(ctrl_pts)**(1/3)))
# create knot vectors
u, v, w = [], [], []
for i in range(num_pts+order+1):
coeff = min(max(0, i-order), num_pts-order)
u.append(min_bound[0] + coeff*(max_bound[0]-min_bound[0])/(num_pts-order))
v.append(min_bound[1] + coeff*(max_bound[1]-min_bound[1])/(num_pts-order))
w.append(min_bound[2] + coeff*(max_bound[2]-min_bound[2])/(num_pts-order))
#print("knots: ", u)
#print("knots: ", v)
#print("knots: ", w)
return construct_bspline_matrix(ctrl_pts, tmplt_coords, u, v, w, order)
def construct_bspline_matrix(ctrl_pts, tmplt_coords, u, v, w, order=3):
def _compute_basis(x, t, i, p):
if p == 0:
b = np.where((x >= t[i]-1e-5) & (x <= t[i+1]+1e-5), 1., 0.)
#b = np.where((x >= t[i]) & (x <= t[i+1]), 1., 0.)
return b
seg_i = t[i+p] - t[i]
seg_ip1 = (t[i+p+1] - t[i+1])
if np.isclose(seg_i, 0.):
left = np.zeros(x.shape)
else:
left = (x - t[i])/seg_i * _compute_basis(x, t, i, p-1)
if np.isclose(seg_ip1, 0.):
right = np.zeros(x.shape)
else:
right = (t[i+p+1] - x)/(t[i+p+1] - t[i+1]) * _compute_basis(x, t, i+1, p-1)
b = left + right
return b
num_pts = int(round(len(ctrl_pts)**(1/3)))
B = []
B = []
for i in range(num_pts):
for j in range(num_pts):
for k in range(num_pts):
basis_u = _compute_basis(tmplt_coords[:,0], u, i, order)
basis_v = _compute_basis(tmplt_coords[:,1], v, j, order)
basis_w = _compute_basis(tmplt_coords[:,2], w, k, order)
b_list = basis_u * basis_v * basis_w
B.append(b_list)
B = np.stack(B, axis=1)
if np.any(np.sum(B, axis=-1)==0):
raise RuntimeError("NaN in the B spline matrix!.")
#np.set_printoptions(threshold=np.inf)
#print(B)
B /= np.sum(B, axis=-1, keepdims=True)
B[B<1e-5] = 0.
B[np.isnan(B)] = 0.
#print("Check NaN: ", np.any(np.isnan(B)))
#print("Check Inf: ", np.any(np.isinf(B)))
#print(B)
s_B = sp.csr_matrix(B, copy=True)
print("Number of elements in grid matrix: ", len(sparse_to_tuple(s_B)[1]))
return s_B
def bspline(Basis_matrix, curr_grid, order=3):
if type(Basis_matrix)==tuple:
Basis_matrix = sp.csr_matrix((Basis_matrix[1], (Basis_matrix[0][:,0], Basis_matrix[0][:,1])), shape=Basis_matrix[-1])
output = Basis_matrix.dot(curr_grid)
return output
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
def transition_matrix_for_multi_level_grid(grid1, grid2, inverse=False):
"""
build a matrix B such that grid2 = B grid1
we assume grid2 is denser than grid1
if inverse, we can compute the left inverse (B^TB)^-1B^T
"""
grid1_min, grid1_max = np.min(grid1, axis=0, keepdims=True), np.max(grid1, axis=0, keepdims=True)
grid2_min, grid2_max = np.min(grid2, axis=0, keepdims=True), np.max(grid2, axis=0, keepdims=True)
grid2_nrmed = (grid2 - grid2_min)/grid2_max
grid1_nrmed = (grid1 - grid1_min)/grid1_max
# find steps
x_step = np.unique(grid1_nrmed[:, 0])
y_step = np.unique(grid1_nrmed[:, 1])
z_step = np.unique(grid1_nrmed[:, 2])
num_x, num_y, num_z = len(x_step), len(y_step), len(z_step)
steps = [x_step[1]-x_step[0], y_step[1]-y_step[0], z_step[1]-z_step[0]]
indices = np.round(grid2_nrmed/np.array(steps), decimals=5)
B = np.zeros((grid2_nrmed.shape[0], grid1_nrmed.shape[0]))
ind_f = np.floor(indices)
ind_c = np.ceil(indices)
ind_f = np.where(ind_f==ind_c, ind_f-1., ind_f)
mask = ind_f<0
ind_f[mask] = 0.
ind_c[mask] =1.
ind_corners = [ind_f, ind_c]
w_f = ind_c - indices
w_c = indices - ind_f
weight_corners = [w_f, w_c]
for i in range(len(ind_corners)):
x_comp = ind_corners[i][:,0]*num_y*num_z
for j in range(len(ind_corners)):
y_comp = ind_corners[j][:,1]*num_z
for k in range(len(ind_corners)):
z_comp = ind_corners[k][:,2]
ind = x_comp + y_comp + z_comp
weight = weight_corners[i][:,0]*weight_corners[j][:,1]*weight_corners[k][:,2]
B[range(grid2_nrmed.shape[0]), ind.astype(int)] = weight
# debug:
test = np.sum(np.matmul(B, grid1) - grid2)
print("Test error: ", test)
if inverse:
inv = np.linalg.inv(np.matmul(B.transpose(), B))
B = np.matmul(inv, B.transpose())
test = np.sum(np.matmul(B, grid2) - grid1)
print("Inverse test error: ", test)
return B
else:
s_B = sp.csr_matrix(B, copy=True)
print("Number of elements in upsample matrix: ", len(sparse_to_tuple(s_B)[1]))
return sparse_to_tuple(s_B)
def transition_matrix_for_multi_level_grid_gaussion(grid1, grid2):
"""
build a matrix B such that grid2 = B grid1
we assume grid2 is denser than grid1
if inverse, we can compute the left inverse (B^TB)^-1B^T
"""
grid1_min, grid1_max = np.min(grid1, axis=0, keepdims=True), np.max(grid1, axis=0, keepdims=True)
grid2_min, grid2_max = np.min(grid2, axis=0, keepdims=True), np.max(grid2, axis=0, keepdims=True)
grid2_nrmed = (grid2 - grid2_min)/grid2_max
grid1_nrmed = (grid1 - grid1_min)/grid1_max
# find steps
x_step = np.unique(grid2_nrmed[:, 0])
y_step = np.unique(grid2_nrmed[:, 1])
z_step = np.unique(grid2_nrmed[:, 2])
num_x, num_y, num_z = len(x_step), len(y_step), len(z_step)
B = np.zeros((grid2.shape[0], grid1.shape[0]))
#assum the grid distribution is uniform
x_space = np.mean(x_step[1:] - x_step[:-1])/3.
y_space = np.mean(y_step[1:] - y_step[:-1])/3.
z_space = np.mean(z_step[1:] - z_step[:-1])/3.
co_var = np.diag([x_space, y_space, z_space])
inv_co_var = np.linalg.inv(co_var)
for i in range(grid2.shape[0]):
curr_pt = np.expand_dims(grid2[i, :], axis=0)
prob = np.sum(np.matmul(grid1-curr_pt, inv_co_var)*(grid1-curr_pt), axis=-1)
B[i,:] = np.squeeze(prob)
B = np.exp(-0.5*B)/np.sqrt((2*np.pi)**3*np.linalg.det(co_var))
B = B/np.max(B, axis=-1, keepdims=True)
rej = B * | np.random.rand(*B.shape) | numpy.random.rand |
import numpy as np
import time
def timemeasure(func):
def wrapper(*args, **kargs):
start_time = time.perf_counter()
result = func(*args, **kargs)
end_time = time.perf_counter()
execution_time = end_time - start_time
print(f'Proc-time: {execution_time}')
return result
return wrapper
class NMF2D():
def __init__(self, n_basis, n_frames, n_pitches, n_iter,
init_W=None, H_sparsity=0.0):
self.n_basis = n_basis
self.n_frames = n_frames
self.n_pitches = n_pitches
self.n_iter = n_iter
self.init_W = init_W
self.err = [0.0 for k in range(0, n_iter)]
self.eps = np.spacing(1)
self.H_penalty = H_sparsity
self.H_norm_order = 0.5
def __init_WH(self, V):
self.Vmax = np.max(V)
self.Ones = np.ones(V.shape)
self.n_row, self.n_col = V.shape
init_H = 0.5 + 0.5*np.random.random((self.n_basis, self.n_pitches, self.n_col))
init_W = 0.5*np.random.random((self.n_row, self.n_basis, self.n_frames))
init_W[:,:,0] = 0.5*np.ones((self.n_row, self.n_basis))
return init_W, init_H
def __W_regularization(self, W, order=2):
return 0.0#np.tile(self.W_penalty*np.linspace(0, 1.0, self.n_frames)**order, (self.n_row, self.n_basis, 1))
def __H_regularization(self, H):
return self.H_penalty * self.__norm(H, (self.H_norm_order-2))
def __update_W(self, V, W, H, order=2.0):
VL, _ = self.__compute_VL(V, W, H)
W_num, W_denom = np.zeros(W.shape), np.zeros(W.shape)
W_penalty = self.__W_regularization(W)
for t in range(0, self.n_frames):
for p in range(0, self.n_pitches):
VLp = self.__shift(VL, p, "up")
HtpT = self.__shift(H[:,p,:], t, "right").T
W_num[:,:,t] += np.dot(VLp, HtpT)
W_denom[:,:,t] += np.dot(self.Ones, HtpT)
W_new = np.clip(W*(W_num / (W_denom) + W_penalty), 0.0, self.Vmax)
return W_new
def __update_H(self, V, W, H):
VL, _ = self.__compute_VL(V, W, H)
H_num, H_denom = np.zeros(H.shape), np.zeros(H.shape)
H_penalty = self.__H_regularization(H)
for p in range(0, self.n_pitches):
for t in range(0, self.n_frames):
VLt = self.__shift(VL, t, "left")
WtT = self.__shift(W[:,:,t], p, "down").T
H_num[:,p,:] += np.dot(WtT, VLt)
H_denom[:,p,:] += np.dot(WtT, self.Ones)
H_new = np.clip(H*(H_num / (H_denom + H_penalty + self.eps)), 0.0, self.Vmax)
return H_new
def __norm(self, X, order):
return np.sum(np.abs(X)**order)**(1.0/order)
def __loss(self, V, W, H):
VL, L = self.__compute_VL(V, W, H)
Ckl = V * np.nan_to_num(np.log(VL)) - V + L
W_reg = 0.0#self.__norm(self.__W_regularization(), 2)
H_reg = self.H_penalty * self.__norm(H, (self.H_norm_order))
return Ckl.sum() + W_reg + H_reg
@timemeasure
def fit(self, V):
W, H = self.__init_WH(V)
for i in range(0, self.n_iter):
W = self.__update_W(V, W, H)
W, H = self.normalize_WH(W, H)
H = self.__update_H(V, W, H)
W, H = self.normalize_WH(W, H)
self.err[i] = self.__loss(V, W, H)
print(i+1, self.err[i])
self.W, self.H = W, H
return W, H
def __shift(self, X, n, direction):
if n == 0:
return X
M, N = X.shape
Ret = np.zeros((M,N))
if direction == "right":
Ret[:,n::] = X[:,0:N-n]
elif direction == "left":
Ret[:,0:N-n] = X[:,n:N]
elif direction == "down":
Ret[n::,:] = X[0:M-n,:]
elif direction == "up":
#Ret[0:M-n,:] = X[n:M,:]
Ret = np.r_[X[n:M,:],np.zeros((n,N))]
return Ret
def __convolution(self, W, H, factrize=False):
V = np.zeros((self.n_row, self.n_col))
for p in range(0, self.n_pitches):
for t in range(0, self.n_frames):
Wtmp = self.__shift(W[:,:,t], p, "down")
Htmp = self.__shift(H[:,p,:], t, "right")
V += np.dot(Wtmp, Htmp)
return V
def get_sources(self, W, H):
S = np.zeros((self.n_row, self.n_col, self.n_basis))
for p in range(0, self.n_pitches):
for t in range(0, self.n_frames):
Wtmp = self.__shift(W[:,:,t], p, "down")
Htmp = self.__shift(H[:,p,:], t, "right")
for k in range(0, self.n_basis):
S[:,:,k] += np.outer(Wtmp[:,k], Htmp[k,:])
return S
def __compute_VL(self, V, W, H, eps=np.spacing(1)):
L = self.__convolution(W, H)
VL = np.nan_to_num(V/L)
return VL, L
def normalize_WH(self, W, H, return_2d=False):
W2d = | np.reshape(W, (self.n_row, self.n_basis*self.n_frames)) | numpy.reshape |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import time
import argparse
from scipy.special import softmax
from openvino.runtime import Core
def image_preprocess(img_path, re_shape):
img = cv2.imread(img_path)
img = cv2.resize(
img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, [2, 0, 1]) / 255
img = np.expand_dims(img, 0)
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
img -= img_mean
img /= img_std
return img.astype(np.float32)
def draw_box(img, results, class_label, scale_x, scale_y):
label_list = list(
map(lambda x: x.strip(), open(class_label, 'r').readlines()))
for i in range(len(results)):
print(label_list[int(results[i][0])], ':', results[i][1])
bbox = results[i, 2:]
label_id = int(results[i, 0])
score = results[i, 1]
if (score > 0.20):
xmin, ymin, xmax, ymax = [
int(bbox[0] * scale_x), int(bbox[1] * scale_y),
int(bbox[2] * scale_x), int(bbox[3] * scale_y)
]
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)
font = cv2.FONT_HERSHEY_SIMPLEX
label_text = label_list[label_id]
cv2.rectangle(img, (xmin, ymin), (xmax, ymin - 60), (0, 255, 0), -1)
cv2.putText(img, "#" + label_text, (xmin, ymin - 10), font, 1,
(255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(img,
str(round(score, 3)), (xmin, ymin - 40), font, 0.8,
(255, 255, 255), 2, cv2.LINE_AA)
return img
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(
current_box, axis=0), )
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
class PicoDetPostProcess(object):
"""
Args:
input_shape (int): network input image size
ori_shape (int): ori image shape of before padding
scale_factor (float): scale factor of ori image
enable_mkldnn (bool): whether to open MKLDNN
"""
def __init__(self,
input_shape,
ori_shape,
scale_factor,
strides=[8, 16, 32, 64],
score_threshold=0.4,
nms_threshold=0.5,
nms_top_k=1000,
keep_top_k=100):
self.ori_shape = ori_shape
self.input_shape = input_shape
self.scale_factor = scale_factor
self.strides = strides
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
def warp_boxes(self, boxes, ori_shape):
"""Apply transform to boxes
"""
width, height = ori_shape[1], ori_shape[0]
n = len(boxes)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2) # x1y1, x2y2, x1y2, x2y1
# xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate(
(x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
return xy.astype(np.float32)
else:
return boxes
def __call__(self, scores, raw_boxes):
batch_size = raw_boxes[0].shape[0]
reg_max = int(raw_boxes[0].shape[-1] / 4 - 1)
out_boxes_num = []
out_boxes_list = []
for batch_id in range(batch_size):
# generate centers
decode_boxes = []
select_scores = []
for stride, box_distribute, score in zip(self.strides, raw_boxes,
scores):
box_distribute = box_distribute[batch_id]
score = score[batch_id]
# centers
fm_h = self.input_shape[0] / stride
fm_w = self.input_shape[1] / stride
h_range = np.arange(fm_h)
w_range = np.arange(fm_w)
ww, hh = np.meshgrid(w_range, h_range)
ct_row = (hh.flatten() + 0.5) * stride
ct_col = (ww.flatten() + 0.5) * stride
center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)
# box distribution to distance
reg_range = np.arange(reg_max + 1)
box_distance = box_distribute.reshape((-1, reg_max + 1))
box_distance = softmax(box_distance, axis=1)
box_distance = box_distance * np.expand_dims(reg_range, axis=0)
box_distance = np.sum(box_distance, axis=1).reshape((-1, 4))
box_distance = box_distance * stride
# top K candidate
topk_idx = np.argsort(score.max(axis=1))[::-1]
topk_idx = topk_idx[:self.nms_top_k]
center = center[topk_idx]
score = score[topk_idx]
box_distance = box_distance[topk_idx]
# decode box
decode_box = center + [-1, -1, 1, 1] * box_distance
select_scores.append(score)
decode_boxes.append(decode_box)
# nms
bboxes = np.concatenate(decode_boxes, axis=0)
confidences = | np.concatenate(select_scores, axis=0) | numpy.concatenate |
##################################
# PROGRAM FOR NUMERICALLY SOLVING SCHRODINGER'S EQUATION
# <NAME>, <NAME>, and <NAME>
##################################
import sys
ver=sys.version_info.major
if ver==2:
from utils2 import *
elif ver==3:
from utils3 import *
else:
print("Python version not recognized. Python 2.5 or greater required.")
import numpy as np
##################################
#FUNCTIONS
##################################
########
# PARTICLE IN AN INFINITE POTENTIAL WELL
########
def infinite_well(steps=2000):
# atomic units
hbar=1.0
m=1.0
# get well width and number of wave function desired
W,n=infinite_well_input()
# divide by two so a well from -W to W is of input width
W=W/2.0
# create x-vector from -W to W
xvec=np.linspace(-W,W,steps)
# get step size
h=xvec[1]-xvec[0]
# create Laplacian via 3 point finite-difference method
Laplacian=(-2.0*np.diag( | np.ones(steps) | numpy.ones |
from enum import Enum
from pathlib import Path
import json
import numpy as np
from api.storage import Dataset
###############################################################################
# Classes
###############################################################################
class EvaluationMode(Enum):
"""
Enum for Evaluation mode.
"""
AROUSAL = 0
VALENCE = 1
MEAN = 2
class PresentationMode(Enum):
"""
Enum for Presentation mode.
"""
AL = 0
ML = 1
DS = 2 # dataset
class LearningProfileDescription:
"""
Used when reading data from an .npy file to create a learning profile
with getter functions for different parameters.
Based on Evaluation/Presentation mode some paramters are set accordingly.
"""
def __init__(self, id, profile, eval=None, pres=None):
"""
Init function for LearningProfileDescription.
Args:
id (String): id for the learning profile.
profile (list): list of objects containing the data for the
learning profile read from disk.
eval (EvaluationMode): Enum for Evaluation mode. Defaults to None.
pres (PresentationMode): Enum for Presentation mode.
Defaults to None.
"""
self._id = id
self._batch_size = profile[10]
self._hyper_parameters = profile[11]
# If needed:
self._score_arousal = profile[4]
self._score_valence = profile[5]
self._score_mean = profile[6]
self._MSE_arousal = profile[7]
self._MSE_valence = profile[8]
self._MSE_mean = profile[9]
self._train_dataset_name = profile[0]
self._test_dataset_name = profile[1]
self._al_func_name = profile[2]
self._ml_func_name = profile[3]
# Evaluation mode
self._eval = eval
if self._eval is not None:
self.set_eval_mode(self._eval)
else:
self._score = None
self._MSE = None
# Presentation mode
self._pres = pres
if self._pres is not None:
self.set_pres_mode(self._pres)
else:
self._attr = None
def set_eval_mode(self, eval):
"""
Set the evaluation mode.
Updates the score and MSE parameters for the given evalution.
Args:
eval (EvaluationMode): Enum for evaluation mode.
"""
self._eval = eval
if eval == EvaluationMode.AROUSAL:
self._score = self._score_arousal
self._MSE = self._MSE_arousal
elif eval == EvaluationMode.VALENCE:
self._score = self._score_valence
self._MSE = self._MSE_valence
elif eval == EvaluationMode.MEAN:
self._score = self._score_mean
self._MSE = self._MSE_mean
def set_pres_mode(self, pres):
"""
Set the presentation mode.
Updates the attribute parameter for the given presentation mode.
Args:
pres (PresentationMode): Enum for presentation mode.
"""
self._pres = pres
if pres == PresentationMode.AL:
self._attr = self._al_func_name
elif pres == PresentationMode.ML:
self._attr = self._ml_func_name
elif pres == PresentationMode.DS:
self._attr = self._train_dataset_name
def get_id(self):
return self._id
def get_batch_size(self):
return self._batch_size
def get_score_arousal(self):
return self._score_arousal
def get_score_valence(self):
return self._score_valence
def get_score_mean(self):
return self._score_mean
def get_MSE_arousal(self):
return self._MSE_arousal
def get_MSE_valence(self):
return self._MSE_valence
def get_MSE_mean(self):
return self._MSE_mean
def get_train_dataset_name(self):
return self._train_dataset_name
def get_test_dataset_name(self):
return self._test_dataset_name
def get_al_func_name(self):
return self._al_func_name
def get_ml_func_name(self):
return self._ml_func_name
def get_name(self, align=False, include_batch_size=False):
"""
Returns the concatinated string of the learning profile, containing
training_dataset_name, test_dataset_name, al_func_name, ml_func_name
and an optional batch_size.
Decent alignment can be achieved by setting align to True.
Args:
align (bool): align text. Defaults to False.
include_batch_size (bool): include batch size information.
Defaults to False.
Returns:
String: A concatinated string of the learning profile.
"""
if align:
return f"{self._train_dataset_name : <10} " + \
f"{self._test_dataset_name : <10} " + \
f"{self._al_func_name : <35} " + \
f"{self._ml_func_name : <30}" + \
("" if not include_batch_size else
f"{f'(Batch Size: {self._batch_size})' : >10}")
return self._train_dataset_name + " " + \
self._test_dataset_name + " " + \
self._al_func_name + " " + \
self._ml_func_name + " " + \
("" if not include_batch_size else
f"(Batch Size: {self._batch_size})")
def get_score(self):
if self._eval is not None:
return self._score
else:
raise ValueError("Evaluation mode not set.")
def get_MSE(self):
if self._eval is not None:
return self._MSE
else:
raise ValueError("Evaluation mode not set.")
def get_attr(self):
if self._pres is not None:
return self._attr
else:
raise ValueError("Presentation mode not set.")
def get_hyper_parameters(self):
return self._hyper_parameters
def __str__(self):
return f"lp-{self._id}"
class AnnotationStation:
"""
Used for saving label annotations for songs.
Reducing the need for annotating the same song more than once.
Annotations will be saved as a dictionsary in following format
for song ids 1 and 2::
{'1': [[arousal1], [valence1]], '2': [[arousal2], [valence2]]}
"""
def __init__(self, path: Path):
"""
AnnotationStation constructor.
Args:
path (Path): Path to dictionary in json format.
(Note: include `.json` tag.)
Raises:
FileNotFoundError: When path is not correct.
"""
self.path = path
if path.exists():
if path.is_file():
with open(path, "r") as f:
self.annotations = json.loads(f.read())
else:
raise FileNotFoundError(f"{path} not a file!")
else:
self.annotations = dict()
def is_song_id_in_annotations(self, song_id: int):
"""
Checks if `song_id` is already saved in annotations and
thus already annotated.
Args:
song_id (int): The song id to check for.
Returns:
bool: True if song id is in annotations.
"""
return str(song_id) in self.annotations
def add_annotation(self, song_id: int, arousal: np.ndarray,
valence: np.ndarray):
"""
Add annotation of `song_id` to annotations dictionary,
followed by saving it to file using `save_annotations()`.
Args:
song_id (int): The song id to add.
arousal (np.ndarray): Dynamic arousal annotations, as a
column vector.
valence (np.ndarray): Dynamic valence annotations, as a
column vector.
"""
ar_dim = arousal.ndim
va_dim = valence.ndim
if ar_dim == 1 and va_dim == 1:
res = | np.array([arousal, valence]) | numpy.array |
# MIT License
#
# Copyright (c) 2016-2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import alib
import vnep_approx
import os
try:
import pickle as pickle
except ImportError:
import pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
import logging
logger = logging.getLogger(__name__)
def extract_parameter_range(scenario_parameter_space_dict, key):
if not isinstance(scenario_parameter_space_dict, dict):
return None
for generator_name, value in scenario_parameter_space_dict.items():
if generator_name == key:
return [key], value
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name, 0] + path, values
elif isinstance(value, dict):
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name] + path, values
return None
def lookup_scenarios_having_specific_values(scenario_parameter_space_dict, path, value):
current_path = path[:]
current_dict = scenario_parameter_space_dict
while len(current_path) > 0:
if isinstance(current_path[0], str):
current_dict = current_dict[current_path[0]]
current_path.pop(0)
elif current_path[0] == 0:
current_path.pop(0)
# print current_dict
return current_dict[value]
def lookup_scenario_parameter_room_dicts_on_path(scenario_parameter_space_dict, path):
current_path = path[:]
current_dict_or_list = scenario_parameter_space_dict
dicts_on_path = []
while len(current_path) > 0:
dicts_on_path.append(current_dict_or_list)
if isinstance(current_path[0], str):
current_dict_or_list = current_dict_or_list[current_path[0]]
current_path.pop(0)
elif isinstance(current_path[0], int):
current_dict_or_list = current_dict_or_list[int(current_path[0])]
current_path.pop(0)
else:
raise RuntimeError("Could not lookup dicts.")
return dicts_on_path
def evaluate_baseline_and_randround(dc_seplp_dynvmp,
seplp_dynvmp_algorithm_id,
seplp_dynvmp_execution_config,
dc_randround,
randround_algorithm_id,
randround_execution_config,
exclude_generation_parameters=None,
parameter_filter_keys=None,
show_plot=False,
save_plot=True,
forbidden_scenario_ids=None,
output_path="./",
output_filetype="png",
request_sets=[[40,60],[80,100]]):
""" Main function for evaluation, creating plots and saving them in a specific directory hierarchy.
A large variety of plots is created. For heatmaps, a generic plotter is used while for general
comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot
be controlled at the moment but the respective plotters can be easily adjusted.
:param dc_seplp_dynvmp: unpickled datacontainer of baseline experiments (e.g. MIP)
:param seplp_dynvmp_algorithm_id: algorithm id of the baseline algorithm
:param seplp_dynvmp_execution_config: execution config (numeric) of the baseline algorithm execution
:param dc_randround: unpickled datacontainer of randomized rounding experiments
:param randround_algorithm_id: algorithm id of the randround algorithm
:param randround_execution_config: execution config (numeric) of the randround algorithm execution
:param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation.
These won't show in the plots and will also not be shown on axis labels etc.
:param parameter_filter_keys: name of parameters according to which the results shall be filtered
:param show_plot: Boolean: shall plots be shown
:param save_plot: Boolean: shall the plots be saved
:param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation
:param output_path: path to which the results shall be written
:param output_filetype: filetype supported by matplotlib to export figures
:return: None
"""
if forbidden_scenario_ids is None:
forbidden_scenario_ids = set()
if exclude_generation_parameters is not None:
for key, values_to_exclude in exclude_generation_parameters.items():
parameter_filter_path, parameter_values = extract_parameter_range(
dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room, key)
parameter_dicts_baseline = lookup_scenario_parameter_room_dicts_on_path(
dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
parameter_dicts_seplpdynvmp = lookup_scenario_parameter_room_dicts_on_path(
dc_randround.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
for value_to_exclude in values_to_exclude:
if value_to_exclude not in parameter_values:
raise RuntimeError("The value {} is not contained in the list of parameter values {} for key {}".format(
value_to_exclude, parameter_values, key
))
#add respective scenario ids to the set of forbidden scenario ids
forbidden_scenario_ids.update(set(lookup_scenarios_having_specific_values(
dc_seplp_dynvmp.scenario_parameter_container.scenario_parameter_dict, parameter_filter_path, value_to_exclude)))
#remove the respective values from the scenario parameter room such that these are not considered when
#constructing e.g. axes
parameter_dicts_baseline[-1][key] = [value for value in parameter_dicts_baseline[-1][key] if
value not in values_to_exclude]
parameter_dicts_seplpdynvmp[-1][key] = [value for value in parameter_dicts_seplpdynvmp[-1][key] if
value not in values_to_exclude]
sep_lp_dynvmp_data_set = {scenario_index:
dc_seplp_dynvmp.algorithm_scenario_solution_dictionary[seplp_dynvmp_algorithm_id][
scenario_index][seplp_dynvmp_execution_config]
for scenario_index in
list(dc_seplp_dynvmp.algorithm_scenario_solution_dictionary[
seplp_dynvmp_algorithm_id].keys()) if scenario_index not in forbidden_scenario_ids}
randround_data_set = {scenario_index:
dc_randround.algorithm_scenario_solution_dictionary[randround_algorithm_id][
scenario_index][randround_execution_config]
for scenario_index in
list(dc_randround.algorithm_scenario_solution_dictionary[
randround_algorithm_id].keys()) if scenario_index not in forbidden_scenario_ids}
plot_comparison_separation_dynvmp_vs_lp(sep_lp_dynvmp_data_set=sep_lp_dynvmp_data_set,
randround_data_set=randround_data_set,
dc_seplp_dynvmp=dc_seplp_dynvmp,
request_sets=request_sets,
output_path=output_path,
output_filetype=output_filetype)
def plot_comparison_separation_dynvmp_vs_lp(sep_lp_dynvmp_data_set,
randround_data_set,
dc_seplp_dynvmp,
request_sets,
output_path,
output_filetype):
logger.info(sep_lp_dynvmp_data_set)
scenarioparameter_room = dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room
scenario_parameter_dict = dc_seplp_dynvmp.scenario_parameter_container.scenario_parameter_dict
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(scenarioparameter_room,
"number_of_requests")
logger.info(list_number_of_requests)
fix, ax = plt.subplots(figsize=(5, 3.5))
def get_color(value):
return plt.cm.inferno(value)
colors = [get_color(0.5),get_color(0.0), get_color(0.75), get_color(0.25)] #get_color(0.7),
#colors = [get_color(0.75), get_color(0.55), get_color(0.35), get_color(0.0)]
linestyles = ['-', ':']
with_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[0], label=r"incl. $\mathcal{T}_r$ comp.", linewidth=2)
wo_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[1], label=r"excl. $\mathcal{T}_r$ comp.", linewidth=2.75)
second_legend_handlers = []
max_observed_value = 0
for request_number_index, number_of_requests_ in enumerate(request_sets):
scenario_ids_to_consider = set()
for number_of_requests in number_of_requests_:
#do the code!
scenario_ids_of_requests = lookup_scenarios_having_specific_values(scenario_parameter_dict, filter_path_number_of_requests, number_of_requests)
scenario_ids_to_consider = scenario_ids_to_consider.union(scenario_ids_of_requests)
speedups_real = []
speedups_wotd = [] # without tree decomposition
relative_speedup_sep_lp_wo_td = []
for scenario_id in scenario_ids_to_consider:
seplp_with_decomposition = sep_lp_dynvmp_data_set[scenario_id].lp_time_preprocess + sep_lp_dynvmp_data_set[scenario_id].lp_time_optimization
seplp_without_decomposition = seplp_with_decomposition - (sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.mean * sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.value_count)
randround_lp_runtime = randround_data_set[scenario_id].meta_data.time_preprocessing + \
randround_data_set[scenario_id].meta_data.time_optimization + \
randround_data_set[scenario_id].meta_data.time_postprocessing
relative_speedup_sep_lp_wo_td.append(seplp_with_decomposition / seplp_without_decomposition)
speedups_real.append(randround_lp_runtime / seplp_with_decomposition)
speedups_wotd.append(randround_lp_runtime / seplp_without_decomposition)
speedup_real = sorted(speedups_real)
speedup_wotd = sorted(speedups_wotd)
logger.info("Relative when excluding tree decomposition computation {} requests:\n"
"mean: {}\n".format(number_of_requests,
np.mean(relative_speedup_sep_lp_wo_td)))
logger.info("Relative speedup compared to cactus LP for {} requests:\n"
"with tree decomposition (mean): {}\n"
"without tree decomposition (mean): {}".format(number_of_requests,
np.mean(speedups_real),
np.mean(speedups_wotd)))
max_observed_value = np.maximum(max_observed_value, speedup_real[-1])
yvals = np.arange(1, len(speedup_real) + 1) / float(len(speedup_real))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_real.append(max_observed_value)
speedup_real.insert(0, 0.5)
ax.semilogx(speedup_real, yvals, color=colors[request_number_index], linestyle=linestyles[0],
linewidth=2.75, alpha=1)
max_observed_value = np.maximum(max_observed_value, speedup_wotd[-1])
yvals = np.arange(1, len(speedup_wotd) + 1) / float(len(speedup_wotd))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_wotd.append(max_observed_value)
speedup_wotd.insert(0, 0.5)
ax.semilogx(speedup_wotd, yvals, color=colors[request_number_index], linestyle=linestyles[1],
linewidth=2.75, alpha=1)
if len(number_of_requests_) == 2:
second_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors[request_number_index], alpha=1, linestyle="-",
label=("{} & {}".format(number_of_requests_[0], number_of_requests_[1])).ljust(3), linewidth=2.5))
else:
second_legend_handlers.append(
matplotlib.lines.Line2D([], [], color=colors[request_number_index], alpha=1, linestyle="-",
label=("{}".format(number_of_requests_[0])).ljust(
3), linewidth=2.5))
first_legend = plt.legend(handles=[with_td, wo_td], loc=4, fontsize=14, title="", handletextpad=.35,
borderaxespad=0.1, borderpad=0.2, handlelength=1)
first_legend.get_frame().set_alpha(1.0)
first_legend.get_frame().set_facecolor("#FFFFFF")
plt.setp(first_legend.get_title(), fontsize=15)
plt.gca().add_artist(first_legend)
# ax.tick_params(labelright=True)
# print second_legend_handlers
second_legend = plt.legend(handles=second_legend_handlers, loc=2, fontsize=14, title="#requests", handletextpad=.35,
borderaxespad=0.175, borderpad=0.2, handlelength=2)
#plt.gca().add_artist(second_legend)
plt.setp(second_legend.get_title(), fontsize=15)
second_legend.get_frame().set_alpha(1.0)
second_legend.get_frame().set_facecolor("#FFFFFF")
# first_legend = plt.legend(title="Bound($\mathrm{MIP}_{\mathrm{MCF}})$", handles=root_legend_handlers, loc=(0.225,0.0125), fontsize=14, handletextpad=0.35, borderaxespad=0.175, borderpad=0.2)
# plt.setp(first_legend.get_title(), fontsize='15')
# plt.gca().add_artist(first_legend)
# plt.setp("TITLE", fontsize='15')
ax.set_title("Cactus LP Runtime Comparison", fontsize=17)
ax.set_xlabel(r"Speedup: time($\mathsf{LP}_{\mathsf{Cactus}}$) / time($\mathsf{LP}_{\mathsf{DynVMP}}$)",
fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.set_xlim(0.4, max_observed_value * 1.15)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax.set_xticks([0.5, 1, 5, 20, 60, ], minor=False)
ax.set_xticks([2, 3, 4, 10, 30, 40], minor=True)
ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], minor=False)
ax.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9], minor=True)
# ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticklabels([], minor=True)
ax.grid(True, which="both", linestyle=":", color='k', alpha=0.7, linewidth=0.33)
plt.tight_layout()
file_to_write = os.path.join(output_path, "ecdf_speedup_cactus_lp_vs_separation_dynvmp." + output_filetype)
plt.savefig(file_to_write)
def plot_comparison_separation_dynvmp_vs_lp_orig(sep_lp_dynvmp_data_set,
randround_data_set,
dc_seplp_dynvmp):
logger.info(sep_lp_dynvmp_data_set)
scenarioparameter_room = dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room
scenario_parameter_dict = dc_seplp_dynvmp.scenario_parameter_container.scenario_parameter_dict
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(scenarioparameter_room,
"number_of_requests")
logger.info(list_number_of_requests)
fix, ax = plt.subplots(figsize=(5, 3.5))
def get_color(value):
return plt.cm.inferno(value)
colors = [get_color(0.75), get_color(0.55),get_color(0.35),get_color(0.0)]
linestyles = ['-', ':']
with_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[0], label=r"incl. $\mathcal{T}_r$ comp.", linewidth=2)
wo_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[1], label=r"excl. $\mathcal{T}_r$ comp.", linewidth=2.75)
second_legend_handlers = []
max_observed_value = 0
for request_number_index, number_of_requests in enumerate(list_number_of_requests):
#do the code!
scenario_ids_of_requests = lookup_scenarios_having_specific_values(scenario_parameter_dict, filter_path_number_of_requests, number_of_requests)
speedups_real = []
speedups_wotd = [] # without tree decomposition
relative_speedup_sep_lp_wo_td = []
for scenario_id in scenario_ids_of_requests:
seplp_with_decomposition = sep_lp_dynvmp_data_set[scenario_id].lp_time_preprocess + sep_lp_dynvmp_data_set[scenario_id].lp_time_optimization
seplp_without_decomposition = seplp_with_decomposition - (sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.mean * sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.value_count)
randround_lp_runtime = randround_data_set[scenario_id].meta_data.time_preprocessing + \
randround_data_set[scenario_id].meta_data.time_optimization + \
randround_data_set[scenario_id].meta_data.time_postprocessing
relative_speedup_sep_lp_wo_td.append(seplp_with_decomposition / seplp_without_decomposition)
speedups_real.append(randround_lp_runtime / seplp_with_decomposition)
speedups_wotd.append(randround_lp_runtime / seplp_without_decomposition)
speedup_real = sorted(speedups_real)
speedup_wotd = sorted(speedups_wotd)
logger.info("Relative when excluding tree decomposition computation {} requests:\n"
"mean: {}\n".format(number_of_requests,
np.mean(relative_speedup_sep_lp_wo_td)))
logger.info("Relative speedup compared to cactus LP for {} requests:\n"
"with tree decomposition (mean): {}\n"
"without tree decomposition (mean): {}".format(number_of_requests,
np.mean(speedups_real),
np.mean(speedups_wotd)))
max_observed_value = np.maximum(max_observed_value, speedup_real[-1])
yvals = np.arange(1, len(speedup_real) + 1) / float(len(speedup_real))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_real.append(max_observed_value)
speedup_real.insert(0, 0.5)
ax.semilogx(speedup_real, yvals, color=colors[request_number_index], linestyle=linestyles[0],
linewidth=2.75, alpha=1)
max_observed_value = np.maximum(max_observed_value, speedup_wotd[-1])
yvals = np.arange(1, len(speedup_wotd) + 1) / float(len(speedup_wotd))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = | np.append(yvals, [1.0]) | numpy.append |
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by <NAME>
# Python translation by <NAME>, with <NAME> and <NAME>
import proper
import numpy as np
def prop_get_amplitude(wf):
"""Function returns amplitude of current wavefront
Parameters
----------
wf : obj
Wavefront class object
Returns
-------
amplitude : numpy ndarray
A 2D image corresponding to the amplitude of the current wavefront
"""
return proper.prop_shift_center( | np.abs(wf.wfarr) | numpy.abs |
import sys
import os
import numpy as np
import openpnm as op
import porespy as ps
import re
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../'))
from computations.calculate_flows import calculate_flows
from max_radius.calculate_max_radius import calculate_max_radius
# This function allows to flip void/rock phases if needed
def flip_values(data, val1, val2):
data = | np.where(data == val1, -999.25, data) | numpy.where |
import cv2
import json
import numpy as np
from cv2 import aruco
import matplotlib.pyplot as plt
def extract_frame(vid_name, frame, frame_name):
vid = cv2.VideoCapture(vid_name)
vid.set(cv2.CAP_PROP_POS_FRAMES, frame)
ret, frame = vid.read()
if ret:
cv2.imwrite(frame_name, frame)
else:
print("Failed reading frame %d in %s" % (frame, vid_name))
def axis_equal_3d(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
R = r * 3 / 4 if dim == 'z' else r
getattr(ax, 'set_{}lim'.format(dim))(ctr - R, ctr + R)
def line(ax, p1, p2, *args, **kwargs):
ax.plot([p1[0], p2[0]], [p1[2], p2[2]], [-p1[1], -p2[1]], *args, **kwargs)
def basis(ax, T, R, *args, length=1, **kwargs):
line(ax, T, T + length * R[:, 0], "r")
line(ax, T, T + length * R[:, 1], "g")
line(ax, T, T + length * R[:, 2], "b")
def board(ax, T, R, *args, label="", **kwargs):
line(ax, T, T + 375 * R[:, 0], "orange", linestyle="--", label=label)
line(ax, T, T + 270 * R[:, 1], "orange", linestyle="--")
line(ax, T + 375 * R[:, 0], T + 375 * R[:, 0] + 270 * R[:, 1], "orange", linestyle="--")
line(ax, T + 270 * R[:, 1], T + 375 * R[:, 0] + 270 * R[:, 1], "orange", linestyle="--")
basis(ax, T, R, length=15)
def sensor(ax, *args, width=146, height=222, depth=56, up=12, behind=13, radius=54, from_top=74, **kwargs):
ex, ey, ez = np.array([width/2, 0, 0]), np.array([0, height/2, 0]), np.array([0, 0, depth/2])
o = | np.array([0, -up, -behind]) | numpy.array |
# coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain BERT"""
from comet_ml import Experiment
#from apex import amp
import os
import random
import numpy as np
import psutil
import torch
from olfmlm.arguments import get_args
from olfmlm.configure_data import configure_data
from olfmlm.learning_rates import AnnealingLR
from olfmlm.model import BertModel
from olfmlm.model import get_params_for_weight_decay_optimization
from olfmlm.model import DistributedDataParallel as DDP
from olfmlm.optim import Adam
from olfmlm.utils import Timers
from olfmlm.utils import save_checkpoint
from olfmlm.utils import load_checkpoint
batch_step = 0
def get_model(tokenizer, args):
"""Build the model."""
print('building BERT model ...')
model = BertModel(tokenizer, args)
print(' > number of parameters: {}'.format(
sum([p.nelement() for p in model.parameters()])), flush=True)
# GPU allocation.
model.cuda(torch.cuda.current_device())
# Wrap model for distributed training.
if args.world_size > 1:
model = DDP(model)
return model
def get_optimizer(model, args):
"""Set up the optimizer."""
# Build parameter groups (weight decay and non-decay).
while isinstance(model, DDP):
model = model.module
param_groups = model.get_params()
# Use Adam.
optimizer = Adam(param_groups,
lr=args.lr, weight_decay=args.weight_decay)
return optimizer
def get_learning_rate_scheduler(optimizer, args):
"""Build the learning rate scheduler."""
# Add linear learning rate scheduler.
if args.lr_decay_iters is not None:
num_iters = args.lr_decay_iters
else:
num_iters = args.train_tokens * args.epochs
init_step = -1
warmup_iter = args.warmup * num_iters
lr_scheduler = AnnealingLR(optimizer,
start_lr=args.lr,
warmup_iter=warmup_iter,
num_iters=num_iters,
decay_style=args.lr_decay_style,
last_iter=init_step)
return lr_scheduler
def setup_model_and_optimizer(args, tokenizer):
"""Setup model and optimizer."""
model = get_model(tokenizer, args)
optimizer = get_optimizer(model, args)
lr_scheduler = get_learning_rate_scheduler(optimizer, args)
criterion_cls = torch.nn.CrossEntropyLoss(reduce=False, ignore_index=-1)
criterion_reg = torch.nn.MSELoss(reduce=False)
criterion = (criterion_cls, criterion_reg)
if args.load is not None:
args.epoch = load_checkpoint(model, optimizer, lr_scheduler, args)
args.resume_dataloader = True
return model, optimizer, lr_scheduler, criterion
def get_batch(data):
""" Get a batch of data from the data loader, which automatically batches the individual examples
Concatenates necessary data (lm_labels, loss_mask, tgs_mask), which is required for FS/QT variant tasks
Puts data into tensors, and places them onto CUDA
"""
# TODO Add trigram mask
aux_labels = {}
for mode, label in data['aux_labels'].items():
if label.shape[1] == 2:
label = torch.cat([label[:, 0], label[:, 1]])
else:
label = label.squeeze()
aux_labels[mode] = torch.autograd.Variable(label.long()).cuda()
num_sentences = data['n']
num_tokens = torch.tensor(sum(data['num_tokens']).item()).long().cuda()
tokens = []
types = []
tasks = []
loss_mask = []
tgs_mask = []
lm_labels = []
att_mask = []
for i in range(min(num_sentences)):
suffix = "_" + str(i)
tokens.append(torch.autograd.Variable(data['text' + suffix].long()).cuda())
types.append(torch.autograd.Variable(data['types' + suffix].long()).cuda())
tasks.append(torch.autograd.Variable(data['task' + suffix].long()).cuda())
att_mask.append(1 - torch.autograd.Variable(data['pad_mask' + suffix].byte()).cuda())
lm_labels.append((data['mask_labels' + suffix]).long())
loss_mask.append((data['mask' + suffix]).float())
tgs_mask.append((data['tgs_mask' + suffix]).float())
lm_labels = torch.autograd.Variable(torch.cat(lm_labels, dim=0).long()).cuda()
loss_mask = torch.autograd.Variable(torch.cat(loss_mask, dim=0).float()).cuda()
tgs_mask = torch.autograd.Variable(torch.cat(tgs_mask, dim=0).float()).cuda()
return (tokens, types, tasks, aux_labels, loss_mask, tgs_mask, lm_labels, att_mask, num_tokens)
def forward_step(data, model, criterion, modes, args):
"""Forward step."""
criterion_cls, criterion_reg = criterion
# Get the batch.
batch = get_batch(data)
tokens, types, tasks, aux_labels, loss_mask, tgs_mask, lm_labels, att_mask, num_tokens = batch
# Create self-supervised labels which required batch size
if "rg" in modes:
aux_labels['rg'] = torch.autograd.Variable(torch.arange(tokens[0].shape[0]).long()).cuda()
if "fs" in modes:
aux_labels['fs'] = torch.autograd.Variable(torch.ones(tokens[0].shape[0] * 2 * args.seq_length).long()).cuda()
# Forward model.
scores = model(modes, tokens, types, tasks, att_mask, checkpoint_activations=args.checkpoint_activations)
assert sorted(list(scores.keys())) == sorted(modes)
# Calculate losses based on required criterion
losses = {}
for mode, score in scores.items():
if mode in ["mlm", "sbo"]:
mlm_loss = criterion_cls(score.view(-1, args.data_size).contiguous().float(),
lm_labels.view(-1).contiguous())
loss_mask = loss_mask.view(-1).contiguous()
losses[mode] = torch.sum(mlm_loss * loss_mask.view(-1).float()) / loss_mask.sum()
elif mode == "tgs":
tgs_loss = criterion_cls(score.view(-1, 6).contiguous().float(),
aux_labels[mode].view(-1).contiguous())
tgs_loss = tgs_loss.view(-1).contiguous()
losses[mode] = torch.sum(tgs_loss * tgs_mask.view(-1).float() / tgs_mask.sum())
elif mode in ["fs", "wlen", "tf", "tf_idf"]: # use regression
losses[mode] = criterion_reg(score.view(-1).contiguous().float(),
aux_labels[mode].view(-1).contiguous().float()).mean()
else:
score = score.view(-1, 2) if mode in ["tc", "cap"] else score
losses[mode] = criterion_cls(score.contiguous().float(),
aux_labels[mode].view(-1).contiguous()).mean()
#print(losses)
return losses, num_tokens
def backward_step(optimizer, model, losses, num_tokens, args):
"""Backward step."""
# Backward pass.
#optimizer.zero_grad()
# For testing purposes, should always be False
if args.no_aux:
total_loss = losses['mlm']
else:
total_loss = sum(losses.values())
total_loss/=2
total_loss.backward()
# Reduce across processes.
losses_reduced = losses
if args.world_size > 1:
losses_reduced = [[k,v] for k,v in losses_reduced.items()]
reduced_losses = torch.cat([x[1].view(1) for x in losses_reduced])
torch.distributed.all_reduce(reduced_losses.data)
torch.distributed.all_reduce(num_tokens)
reduced_losses.data = reduced_losses.data / args.world_size
model.allreduce_params(reduce_after=False,
fp32_allreduce=False)#args.fp32_allreduce)
losses_reduced = {losses_reduced[i][0]: reduced_losses[i] for i in range(len(losses_reduced))}
# Clipping gradients helps prevent the exploding gradient.
if args.clip_grad > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
return losses_reduced, num_tokens
def train_step(input_data, model, criterion, optimizer, lr_scheduler, modes, args):
"""Single training step."""
# Forward model for one step.
losses, num_tokens = forward_step(input_data, model, criterion, modes, args)
# Calculate gradients, reduce across processes, and clip.
losses_reduced, num_tokens = backward_step(optimizer, model, losses, num_tokens, args)
# Update parameters.
global batch_step
batch_step = batch_step+1
if batch_step % 2 ==0:
optimizer.step()
optimizer.zero_grad()
#optimizer.step()
return losses_reduced, num_tokens
def get_stage_info(total_tokens, num_tasks):
"""
Get number of tokens for each task during each stage. Based on ERNIE 2.0's continual multi-task learning
Number of stages is equal to the number of tasks (each stage is larger than the previous one)
:param total_tokens: total number of tokens to train on
:param num_tasks: number of tasks
:return: Number of tokens for each task at each stage
"""
tokens_per_task = total_tokens / num_tasks
tokens_subunit = tokens_per_task / (num_tasks + 1)
tokens_per_task_per_stage = []
for i in range(num_tasks):
stage_tokens = []
for j in range(num_tasks):
if i < j:
stage_tokens.append(0)
elif i > j:
stage_tokens.append(tokens_subunit)
else:
stage_tokens.append(tokens_subunit * (i + 2))
tokens_per_task_per_stage.append(stage_tokens)
return tokens_per_task_per_stage
def set_up_stages(args):
"""
Set up stage information and functions to use for ERNIE 2.0's continual multi-task learning
Closure that returns a function that will return next stages token requirements as requested
:param args: arguments
:return: a function that will return next stages token requirements as requested
"""
assert not args.incremental
total_tokens = args.epochs * args.train_tokens
modes = args.modes.split(',')
if args.always_mlm:
modes = modes[1:]
stage_splits = get_stage_info(total_tokens, len(modes))
stage_idx = 0
def next_stage():
nonlocal stage_idx
if stage_idx >= len(stage_splits):
print("Finished all training, shouldn't reach this unless it's the very final iteration")
return {k: float(total_tokens) for k in modes}
assert len(modes) == len(stage_splits[stage_idx])
current_stage = {k: v for k, v in zip(modes, stage_splits[stage_idx])}
print("Starting stage {} of {}, with task distribution: ".format(stage_idx, len(stage_splits)))
print(current_stage)
stage_idx += 1
return current_stage
return next_stage
def get_mode_from_stage(current_stage, args):
"""
Get the mode to use given the current stage
:param current_stage: number of tokens left for each task for this stage
:param args: arguments
:return: selected mode
"""
modes = args.modes.split(',')
if args.always_mlm:
modes = modes[1:]
p = np.array([current_stage[m] for m in modes])
p /= | np.sum(p) | numpy.sum |
import numpy as np
from copy import deepcopy
def sigmoid(x):
return 1/(1 + np.exp(-x))
def backward_tanh(x):
return 1 - x*x
def tanh(x):
return np.tanh(x)
def backward_sigmoid(x):
return x*(1 - x)
def cross_entropy_loss(probs, target_index):
'''
Computes cross-entropy loss
Arguments:
probs, np array, shape is either (N) or (batch_size, N) -
probabilities for every class
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss: single value
'''
probs_left = np.choose(target_index, probs.reshape(probs.shape[0], probs.shape[1]).T)
return np.sum(-np.log(probs_left))
def softmax(predictions):
'''
Computes probabilities from scores
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
Returns:
probs, np array of the same shape as predictions -
probability for every class, 0..1
'''
pred = predictions.copy()
pred_temp = np.swapaxes(pred, 0, 1)
pred = np.swapaxes(pred_temp - np.max(pred, axis=1), 1, 0)
exps = np.exp(pred)
downs = np.sum(exps, axis=1)
probs = exps/downs[:, None]
return probs
def softmax_with_cross_entropy(preds, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (N, batch_size) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
probs = softmax(preds)
loss = cross_entropy_loss(probs, target_index)
d_out = np.copy(probs)
for idx, row in enumerate(d_out):
row[target_index[idx]] -= 1
return loss, d_out
class Param:
'''
Trainable parameter of the model
Captures both parameter value and the gradient
'''
def __init__(self, value):
self.value = value
self.grad = np.zeros_like(value)
class Seq2SeqLSTM:
def __init__(self, hid_layer_size, num_dict, w_std):
self.hid_layer_size = hid_layer_size
self.num_dict = num_dict
conc_size = hid_layer_size + num_dict
self.W_f = Param(w_std * np.random.randn(hid_layer_size, conc_size))
self.b_f = Param(np.zeros((hid_layer_size, 1)))
self.W_i = Param(w_std * np.random.randn(hid_layer_size, conc_size))
self.b_i = Param(np.zeros((hid_layer_size, 1)))
self.W_c = Param(w_std * np.random.randn(hid_layer_size, conc_size))
self.b_c = Param(np.zeros((hid_layer_size, 1)))
self.W_o = Param(w_std * np.random.randn(hid_layer_size, conc_size))
self.b_o = Param(np.zeros((hid_layer_size, 1)))
self.W_y = Param(w_std * np.random.randn(num_dict, hid_layer_size))
self.b_y = Param(np.zeros((num_dict, 1)))
self.h = Param(np.zeros((hid_layer_size, 1)))
self.C = Param(np.zeros((hid_layer_size, 1)))
self.ft = Param(np.zeros((hid_layer_size, 1)))
self.it = Param(np.zeros((hid_layer_size, 1)))
self.C_hat = Param(np.zeros((hid_layer_size, 1)))
self.out = Param(np.zeros((hid_layer_size, 1)))
self.cache = {}
def params(self):
return {'W_f': self.W_f, 'W_i': self.W_i,
'W_c': self.W_c, 'W_o': self.W_o, 'W_y': self.W_y, 'b_f': self.b_f,
'b_i': self.b_i, 'b_c': self.b_c, 'b_o': self.b_o, 'b_y': self.b_y,
'h': self.h, 'C': self.C, 'ft': self.ft, 'it': self.it,
'C_hat': self.C_hat, 'out': self.out}
def forward(self, X_train):
outputs = []
for idx, x in enumerate(X_train):
x_one_hot = np.zeros(self.num_dict)
x_one_hot[x] = 1
x_one_hot = x_one_hot.reshape(1, -1)
X = Param(np.row_stack((self.h.value, x_one_hot.T)))
if -1 not in self.cache:
self.cache[-1] = (X, self.ft, self.it, self.C_hat, self.out, self.h, self.C)
self.ft.value = sigmoid(np.dot(self.W_f.value, X.value) + self.b_f.value)
self.it.value = sigmoid(np.dot(self.W_i.value, X.value) + self.b_i.value)
self.C_hat.value = tanh(np.dot(self.W_c.value, X.value) + self.b_c.value)
self.C.value = self.ft.value*self.C.value + self.it.value*self.C_hat.value
self.out.value = sigmoid( | np.dot(self.W_o.value, X.value) | numpy.dot |
from __future__ import absolute_import, print_function
import numpy as npy
from PyDSTool import Events, Variable, Pointset, Trajectory
from PyDSTool.common import args, metric, metric_L2, metric_weighted_L2, \
metric_float, remain, fit_quadratic, fit_exponential, fit_diff_of_exp, \
smooth_pts, nearest_2n_indices, make_poly_interpolated_curve, simple_bisection
from PyDSTool.Trajectory import numeric_to_traj
from PyDSTool.ModelContext import *
from PyDSTool.Toolbox.data_analysis import butter, filtfilt, rectify
from PyDSTool.errors import PyDSTool_KeyError
import copy
# Test this on a single spike with global max at spike and minima at endpoints
# Test this on a mexican hat type spike with global min and max at spike peak and trough
# Test this on monotonic data for worst case scenario!! Should return None for max and min
# Also test on noisy monotonic data
# Return value of Nones to a feature evaluator should suggest to it to change window size for defining pts
def find_internal_extrema(pts, noise_tol=0):
"""
Find an interior (local) maximum and minimum values of a 1D pointset, away from the endpoints.
Returns a dictionary mapping 'local_max' -> (index_max, xmax), 'local_min' -> (index_min, xmin),
whose values are None if the pointset is monotonic or is close enough so that the global extrema
are at the endpoints.
Use noise_tol > 0 to avoid getting a local extremum right next to an endpoint because of noise.
Also returned in the dictionary for reference:
'first' -> (0, <start_endpoint_value>), 'last' -> (last_index, <last_endpoint_value>),
'global_max' -> (index, value), 'global_min' -> (index, value)
Assumes there is only one interior (max, min) pair in pts, otherwise will return an arbitrary choice
from multiple maxima and minima."""
assert pts.dimension == 1
# convert all singleton points to floats with [0] selection
x0 = pts[0][0]
x1 = pts[-1][0]
# need last_ix explicitly for index test below
last_ix = len(pts)-1
end_ixs = (0, last_ix)
max_val_ix = npy.argmax(pts)
min_val_ix = npy.argmin(pts)
glob_xmax = pts[max_val_ix][0]
glob_xmin = pts[min_val_ix][0]
no_local_extrema = {'local_max': (None, None), 'local_min': (None, None),
'first': (0, x0), 'last': (last_ix, x1),
'global_max': (max_val_ix, glob_xmax),
'global_min': (min_val_ix, glob_xmin)
}
max_at_end = max_val_ix in end_ixs
min_at_end = min_val_ix in end_ixs
if max_at_end:
if min_at_end:
# No detectable turning points present (this is criterion for ignoring noisy data)
return no_local_extrema
else:
# interior minimum found
index_min = min_val_ix
xmin = pts[index_min]
# find associated interior local maximum
max_val_ix1 = npy.argmax(pts[:min_val_ix])
max_val_ix2 = npy.argmax(pts[min_val_ix:])+min_val_ix
if max_val_ix1 in end_ixs:
if max_val_ix2 in end_ixs:
index_max = None
xmax = None
else:
index_max = max_val_ix2
xmax = pts[index_max][0]
else:
# assumes only one local max / min pair in interior!
index_max = max_val_ix1
xmax = pts[index_max][0]
else:
# interior maximum found
index_max = max_val_ix
xmax = pts[index_max][0]
# find associated interior local minimum
min_val_ix1 = npy.argmin(pts[:max_val_ix])
xmin1 = pts[min_val_ix1][0]
min_val_ix2 = npy.argmin(pts[max_val_ix:])+max_val_ix
xmin2 = pts[min_val_ix2][0]
if min_val_ix1 in end_ixs or abs(xmin1-x0)<noise_tol or abs(xmin1-x1)<noise_tol:
if min_val_ix2 in end_ixs or abs(xmin1-x0)<noise_tol or abs(xmin1-x1)<noise_tol:
index_min = None
xmin = None
else:
index_min = min_val_ix2
xmin = xmin2
else:
# assumes only one local max / min pair in interior!
index_min = min_val_ix1
xmin = xmin1
return {'local_max': (index_max, xmax), 'local_min': (index_min, xmin),
'first': (0, x0), 'last': (last_ix, x1),
'global_max': (max_val_ix, glob_xmax),
'global_min': (min_val_ix, glob_xmin)}
class get_spike_model(ql_feature_leaf):
"""Qualitative test for presence of spike in model trajectory data
using events to identify spike times. Also records salient spike
information for quantitative comparisons later."""
def evaluate(self, traj):
# function of traj, not target
pts = traj.sample(coords=[self.super_pars.burst_coord],
tlo=self.pars.tlo,
thi=self.pars.tlo+self.pars.width_tol)
loc_extrema = find_internal_extrema(pts)
if self.pars.verbose_level > 0:
print(loc_extrema)
max_val_ix, xmax = loc_extrema['local_max']
global_max_val_ix, global_xmax = loc_extrema['global_max']
min_val_ix, xmin = loc_extrema['local_min']
global_min_val_ix, global_xmin = loc_extrema['global_min']
# could split these tests into 3 further sub-features but we'll skip that here for efficiency
if xmax is None:
self.results.ixmax = None
self.results.tmax = None
test1 = test2 = test3 = False
else:
test1 = max_val_ix not in (loc_extrema['first'][0], loc_extrema['last'][0])
test2 = npy.linalg.norm(global_xmin-xmax) > self.pars.height_tol
try:
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
except:
# fails if xmin is None, i.e. no interior minimum
# allow no local minimum present, in which case use the other endpoint for test
# ... we don't know which is the one alread tested in test2, so test both ends again,
# knowing that they are both lower than the interior maximum found in this case
xmin = max([global_xmin, loc_extrema['last'][1], loc_extrema['first'][1]])
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
self.results.ixmax = max_val_ix
self.results.tmax = pts.indepvararray[max_val_ix]
self.results.spike_pts = pts
return test1 and test2 and test3
def finish(self, traj):
self.results.spike_time = self.results.tmax
self.results.spike_val = self.results.spike_pts[self.results.ixmax][self.super_pars.burst_coord]
class get_spike_data(ql_feature_leaf):
"""Qualitative test for presence of spike in noisy data. Also records salient spike information
for quantitative comparisons later.
Criteria: ensure a maximum occurs, and that this is away from endpoints of traj
"Uniqueness" of this maximum can only be determined for noisy data using a height
tolerance.
Assumes spikes will never bunch up too much so that more than spike occurs in the
spacing_tol window.
Finds maximum position using a quadratic fit.
"""
def _local_init(self):
# avoids recreating this object for every test
self.quadratic = fit_quadratic(verbose=self.pars.verbose_level>0)
def evaluate(self, traj):
# function of traj, not target
event_args = {'name': 'spike_thresh',
'eventtol': self.pars.eventtol,
'eventdelay': self.pars.eventtol*.1,
'starttime': 0,
'active': True}
if 'coord' not in self.pars:
self.pars.coord = self.super_pars.burst_coord
# update thi each time b/c tlo will be different
self.pars.thi = self.pars.tlo+self.pars.width_tol
self.pars.ev = Events.makePythonStateZeroCrossEvent(self.pars.coord,
"thresh", 0,
event_args, traj.variables[self.pars.coord])
pts = traj.sample(coords=[self.pars.coord], tlo=self.pars.tlo,
thi=self.pars.thi)
if pts.indepvararray[-1] < self.pars.thi:
self.pars.thi = pts.indepvararray[-1]
loc_extrema = find_internal_extrema(pts, self.pars.noise_tol)
if self.pars.verbose_level > 0:
print(loc_extrema)
# from PyDSTool import plot, show
## plot spike and quadratic fit
#plot(pts.indepvararray, pts[self.super_pars.burst_coord], 'go-')
#show()
max_val_ix, xmax = loc_extrema['local_max']
global_max_val_ix, global_xmax = loc_extrema['global_max']
min_val_ix, xmin = loc_extrema['local_min']
global_min_val_ix, global_xmin = loc_extrema['global_min']
# could split these tests into 3 further sub-features but we'll skip that here for efficiency
test1 = max_val_ix not in (loc_extrema['first'][0], loc_extrema['last'][0])
test2 = npy.linalg.norm(global_xmin-xmax) > self.pars.height_tol
try:
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
except:
# fails if xmin is None, i.e. no interior minimum
# allow no local minimum present, in which case use the other endpoint for test
# ... we don't know which is the one already tested in test2, so test both ends again,
# knowing that they are both lower than the interior maximum found in this case
xmin = max([global_xmin, loc_extrema['last'][1], loc_extrema['first'][1]])
test3 = npy.linalg.norm(xmin-xmax) > self.pars.height_tol
# generate a suitable threshold from local maximum
try:
thresh_pc = self.pars.thresh_pc
except:
# default value of 15%
thresh_pc = 0.15
thresh = (xmin + thresh_pc*(xmax-xmin))
if self.pars.verbose_level > 0:
print("xmin used =", xmin)
print("thresh = ", thresh)
# Define extent of spike for purposes of quadratic fit ...
evs_found = self.pars.ev.searchForEvents(trange=[self.pars.tlo,
self.pars.thi],
parDict={'thresh': thresh})
tlo = evs_found[0][0]
thi = evs_found[1][0]
tmax = pts.indepvararray[max_val_ix]
symm_dist = npy.min([abs(tmax-tlo), abs(thi-tmax)])
# HACK! Ensure dt value will not cause us to hit an index directly, otherwise
# have to catch case from Pointset.find method when return value is a single
# integer index rather than a pair of indices
if symm_dist > self.pars.fit_width_max/2.000000007:
dt = self.pars.fit_width_max/2.000000007
else:
dt = symm_dist*1.0000000007
tlo = tmax-dt
thi = tmax+dt
ixlo = pts.find(tmax-dt, end=0)
ixhi = pts.find(tmax+dt, end=1)
if self.pars.verbose_level > 0:
print("ixlo =", ixlo, "ixhi =", ixhi)
print("tlo =",tmax-dt, "thi =",tmax+dt)
print(pts[ixlo], pts[ixhi])
print("\nget_spike tests:", test1, test2, test3)
self.results.ixlo = ixlo
self.results.ixhi = ixhi
self.results.ixmax = max_val_ix
self.results.tlo = tlo
self.results.thi = thi
self.results.tmax = tmax
self.results.spike_pts = pts[ixlo:ixhi]
return test1 and test2 and test3
def finish(self, traj):
# function of traj, not target
if self.pars.verbose_level > 0:
print("Finishing spike processing...")
pts = self.results.spike_pts
coord = self.pars.coord
xlo = pts[0][0]
# xmax is just an estimate of the max value
xmax = pts[self.results.ixmax-self.results.ixlo][0]
estimate_quad_coeff = -(xmax-xlo)/((self.results.tmax - \
self.results.tlo)**2)
estimate_intercept = xlo - \
((xmax-xlo)/(self.results.tmax-self.results.tlo))*self.results.tlo
res = self.quadratic.fit(pts.indepvararray, pts[coord],
pars_ic=(estimate_quad_coeff,0,estimate_intercept),
opts=args(peak_constraint=(self.results.ixmax - \
self.results.ixlo,xmax,
self.pars.weight*len(pts)/(self.results.tmax - \
self.results.tlo),
self.pars.weight*len(pts)/(xmax-xlo))))
tval, xval = res.results.peak
self.results.spike_time = tval
self.results.spike_val = xval
self.results.pars_fit = res.pars_fit
if self.pars.verbose_level > 0:
from PyDSTool import plot, show
# plot spike and quadratic fit
dec = 10
plot(pts.indepvararray, pts[coord], 'go-')
plot(tval, xval, 'rx')
ts = [pts.indepvararray[0]]
for i, t in enumerate(pts.indepvararray[:-1]):
ts.extend([t+j*(pts.indepvararray[i+1]-t)/dec for j in range(1,dec)])
ts.append(pts.indepvararray[-1])
plot(ts, [res.results.f(t) for t in ts], 'k:')
# temp
if self.pars.verbose_level > 1:
show()
class get_burst_duration(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
on_t = self.super_pars.ref_spike_times[0] - self.pars.t_lookback
self.pars.ref_burst_on_time = on_t
# find associated V for ref_on_thresh
pts = self.super_pars.ref_burst_coord_pts
x = pts[self.super_pars.burst_coord]
on_ix = pts.find(on_t, end=1)
ix_lo, ix_hi = nearest_2n_indices(x, on_ix, 2)
t = pts.indepvararray
on_res = smooth_pts(t[ix_lo:ix_hi+1],
x[ix_lo:ix_hi+1], self.super_pars.quadratic)
self.pars.ref_on_thresh = on_res.results.f(on_t)
#
off_t = self.super_pars.ref_spike_times[-1] + self.pars.t_lookforward
self.pars.ref_burst_off_time = off_t
off_ix = pts.find(off_t, end=0)
ix_lo, ix_hi = nearest_2n_indices(x, off_ix, 2)
off_res = smooth_pts(t[ix_lo:ix_hi+1],
x[ix_lo:ix_hi+1], self.super_pars.quadratic)
self.pars.ref_off_thresh = off_res.results.f(off_t)
self.pars.ref_burst_duration = off_t - on_t
self.pars.ref_burst_prop = (off_t - on_t)/self.super_pars.ref_period
def evaluate(self, target):
traj = target.test_traj
varname = self.super_pars.burst_coord
pts = self.super_pars.burst_coord_pts
on_t = self.super_results.spike_times[0] - self.pars.t_lookback
self.results.burst_on_time = on_t
x = pts[varname]
on_ix = pts.find(on_t, end=1)
ix_lo, ix_hi = nearest_2n_indices(x, on_ix, 2)
pp = make_poly_interpolated_curve(pts[ix_lo:ix_hi+1], varname,
target.model)
thresh = pp(on_t)
self.results.on_thresh = thresh
#
# don't find "off" based on last spike time because
# when new spikes suddenly appear this value will jump
# instead, use a threshold event search, assuming that
# only one period is "in view"
t = pts.indepvararray
x_rev = x[:ix_hi:-1]
t_rev = t[:ix_hi:-1]
off_ix = len(x) - npy.argmin(npy.asarray(x_rev < thresh, int))
ix_lo, ix_hi = nearest_2n_indices(x, off_ix, 2)
pp = make_poly_interpolated_curve(pts[ix_lo:ix_hi+1], varname,
target.model)
# bisect to find accurate crossing point
tlo = t[ix_lo]
thi = t[ix_hi]
off_t = simple_bisection(tlo, thi, pp, self.pars.t_tol)
self.results.burst_duration = off_t - on_t
self.results.burst_prop = (off_t - on_t) / self.super_results.period
return self.metric(self.results.burst_prop,
self.super_pars.ref_burst_prop) < self.pars.tol
class get_burst_active_phase(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
self.pars.ref_active_phase = self.super_pars.ref_spike_times[0] / \
self.super_pars.ref_period
def evaluate(self, target):
self.results.active_phase = self.super_results.spike_times[0] / \
self.super_results.period
return self.metric(self.results.active_phase,
self.pars.ref_active_phase) \
< self.pars.tol
class get_burst_dc_offset(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
# 20% of burst_on_V (i.e., on_thresh) - min_V above min_V
self.pars.ref_baseline_V = self.super_pars.ref_min_V + \
0.2*(self.super_pars.ref_on_thresh - \
self.super_pars.ref_min_V)
def evaluate(self, target):
baseline = self.super_results.min_V + 0.2*(self.super_results.on_thresh - \
self.super_results.min_V)
self.results.baseline_V = baseline - self.super_pars.ref_baseline_V
return self.metric(baseline, self.super_pars.ref_baseline_V) < \
self.pars.tol
class get_burst_passive_extent(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def postprocess_ref_traj(self):
self.pars.ref_passive_extent_V = self.super_pars.ref_max_V - \
self.super_pars.ref_min_V
def evaluate(self, target):
self.results.passive_extent_V = self.super_results.max_V - \
self.super_results.min_V
return self.metric(self.results.passive_extent_V,
self.super_pars.ref_passive_extent_V) < \
self.pars.tol
class burst_feature(ql_feature_node):
"""Embed the following sub-features, if desired:
get_burst_X, where X is a number of feature types defined in this module.
"""
def _local_init(self):
self.pars.quadratic = fit_quadratic(verbose=self.pars.verbose_level>0)
self.pars.filt_coeffs = butter(3, self.pars.cutoff, btype='highpass')
self.pars.filt_coeffs_LP = butter(3, self.pars.cutoff/10)
def postprocess_ref_traj(self):
# single coord used as indicator
pts = self.ref_traj.sample()
burst_pts = self.ref_traj.sample(coords=[self.pars.burst_coord],
dt=self.pars.dt)
xrs = burst_pts[self.pars.burst_coord]
trs = burst_pts.indepvararray
x = pts[self.pars.burst_coord]
b, a = self.pars.filt_coeffs_LP
xf = filtfilt(b, a, xrs)
t = pts.indepvararray
min_val_ix = npy.argmin(xf) # use LPF version to avoid noise artifacts
max_val_ix = npy.argmax(xf) # use LPF version to avoid spikes
min_ix_lo, min_ix_hi = nearest_2n_indices(xrs, min_val_ix, 30)
max_ix_lo, max_ix_hi = nearest_2n_indices(xrs, max_val_ix, 30)
min_res = smooth_pts(trs[min_ix_lo:min_ix_hi+1],
xf[min_ix_lo:min_ix_hi+1], self.pars.quadratic)
# use LPF data for max
max_res = smooth_pts(trs[max_ix_lo:max_ix_hi+1],
xf[max_ix_lo:max_ix_hi+1], self.pars.quadratic)
min_t, min_val = min_res.results.peak
max_t, max_val = max_res.results.peak
# thresh1 = float(max_val-self.pars.active_frac_height*(max_val-min_val))
# thresh2 = x[0]+3.
# # don't make threshold smaller than initial value, assuming
# # burst will be rising at initial condition
# thresh = max((thresh1,thresh2))
self.pars.ref_burst_coord_pts = pts
# self.pars.ref_on_thresh = thresh
# self.pars.ref_off_thresh = thresh
self.pars.ref_min_V = min_val
self.pars.ref_max_V = max_val
assert self.pars.on_cross_dir in (-1,1)
if self.pars.on_cross_dir == 1:
self.pars.off_cross_dir = -1
else:
self.pars.off_cross_dir = 1
self.pars.ref_burst_est = estimate_spiking(burst_pts[self.pars.burst_coord],
burst_pts.indepvararray,
self.pars.filt_coeffs)
self.pars.ref_burst_pts_resampled = burst_pts
# spike times will be overwritten by get_spikes_data instance, if present
#self.pars.ref_spike_times = self.pars.ref_burst_est.spike_ts
# to establish period, find min on other side of active phase
if min_t < self.pars.ref_burst_est.spike_ts[0]:
# look to the right
start_t = self.pars.ref_burst_est.spike_ts[-1]
start_ix = pts.find(start_t, end=1)
other_min_ix = npy.argmin(x[start_ix:])
other_min_t = t[start_ix+other_min_ix]
else:
# look to the left
start_t = self.pars.ref_burst_est.spike_ts[0]
start_ix = pts.find(start_t, end=0)
other_min_ix = npy.argmin(x[:start_ix])
other_min_t = t[other_min_ix]
self.pars.ref_period = abs(other_min_t - min_t)
def prepare(self, target):
# single coord used as indicator
pts = target.test_traj.sample()
x = pts[self.pars.burst_coord]
burst_pts = target.test_traj.sample(coords=[self.pars.burst_coord],
dt=self.pars.dt)
xrs = burst_pts[self.pars.burst_coord]
trs = burst_pts.indepvararray
if max(x)-min(x) < 5:
print("\n\n Not a bursting trajectory!!")
raise ValueError("Not a bursting trajectory")
b, a = self.pars.filt_coeffs_LP
xf = filtfilt(b, a, xrs)
t = pts.indepvararray
min_val_ix = npy.argmin(x) # precise because of Model's events
max_val_ix = npy.argmax(xf)
max_ix_lo, max_ix_hi = nearest_2n_indices(xrs, max_val_ix, 4)
max_res = smooth_pts(trs[max_ix_lo:max_ix_hi+1],
xf[max_ix_lo:max_ix_hi+1], self.pars.quadratic)
min_t = t[min_val_ix]
min_val = x[min_val_ix]
max_t, max_val = max_res.results.peak
self.results.min_V = min_val
self.results.max_V = max_val
assert self.pars.on_cross_dir in (-1,1)
if self.pars.on_cross_dir == 1:
self.pars.off_cross_dir = -1
else:
self.pars.off_cross_dir = 1
self.results.burst_est = estimate_spiking(burst_pts[self.pars.burst_coord],
burst_pts.indepvararray,
self.pars.filt_coeffs)
# record approximate spike times - may be overwritten by
# get_burst_spikes if done accurately
#self.results.spike_times = self.results.burst_est.spike_ts
if self.pars.verbose_level > 0:
print("Spikes found at (approx) t=", self.results.burst_est.spike_ts)
if self.results.burst_est.spike_ts[0] < self.pars.shrink_end_time_thresh:
# kludgy way to ensure that another burst doesn't encroach
if not hasattr(self.pars, 'shrunk'):
# do this *one time*
end_time = t[-1] - self.pars.shrink_end_time_amount
target.model.set(tdata=[0,end_time])
end_pts = pts.find(end_time, end=0)
end_burst_pts = burst_pts.find(end_time, end=0)
pts = pts[:end_pts]
burst_pts = burst_pts[:end_burst_pts]
self.pars.shrunk = True
elif hasattr(self.pars, 'shrunk'):
# in case period grows back reset end time *one time*
target.model.set(tdata=[0,t[-1]+self.pars.shrink_end_time_amount])
del self.pars.shrunk
self.pars.burst_coord_pts = pts
self.pars.burst_pts_resampled = burst_pts
# to establish period, find min on other side of active phase
if min_t < self.results.burst_est.spike_ts[0]:
# look to the right
start_t = self.results.burst_est.spike_ts[-1]
start_ix = pts.find(start_t, end=1)
other_min_ix = npy.argmin(x[start_ix:])
other_min_t = t[start_ix+other_min_ix]
other_min_val = x[start_ix+other_min_ix]
else:
# look to the left
start_t = self.results.burst_est.spike_ts[0]
start_ix = pts.find(start_t, end=0)
other_min_ix = npy.argmin(x[:start_ix])
other_min_t = t[other_min_ix]
other_min_val = x[other_min_ix]
self.results.period = abs(other_min_t - min_t)
self.results.period_val_error = other_min_val - min_val
class get_burst_spikes(ql_feature_node):
"""Requires a get_spike_data and get_spike_model instance to be
the only sub-features (supplied as a dict with keys 'is_spike_data'
and 'is_spike_model').
"""
def _local_init(self):
assert len(self.subfeatures) == 2
assert remain(self.subfeatures.keys(),
['is_spike_data', 'is_spike_model']) == []
def postprocess_ref_traj(self):
# get precise spike times and record in self.results.ref_spike_times
self.pars.ref_spike_times, self.pars.ref_spike_vals = \
self._eval(self.ref_traj, self.super_pars.ref_burst_est,
self.subfeatures['is_spike_data'])
def evaluate(self, target):
self.results.spike_times, self.results.spike_vals = \
self._eval(target.test_traj, self.super_results.burst_est,
self.subfeatures['is_spike_model'])
# satisfied if all spikes determined correctly
return len(self.results.spike_times) == \
len(self.super_results.burst_est.spike_ixs)
def _eval(self, traj, burst_est, is_spike):
# isn't the next line redundant?
is_spike.super_pars = copy.copy(self.pars)
spike_times = []
spike_vals = []
satisfied = True
for spike_num, spike_ix in enumerate(burst_est.spike_ixs):
if self.pars.verbose_level > 0:
print("\n Starting spike", spike_num+1)
is_spike.super_pars.burst_coord = self.super_pars.burst_coord
# step back 20% of estimated period
try:
is_spike.pars.width_tol = burst_est.ISIs[spike_num]*.8
except IndexError:
# one fewer ISI than spike, so just assume last one is about
# the same
is_spike.pars.width_tol = burst_est.ISIs[spike_num-1]*.8
is_spike.pars.tlo = burst_est.t[spike_ix] - \
is_spike.pars.width_tol #/ 2.
if self.pars.verbose_level > 0:
print("new tlo =", is_spike.pars.tlo)
# would prefer to work this out self-consistently...
#is_spike.pars.fit_width_max = ?
new_sat = is_spike(traj)
satisfied = satisfied and new_sat
# make recorded spike time in global time coordinates
if new_sat:
spike_times.append(is_spike.results.spike_time)
spike_vals.append(is_spike.results.spike_val)
if self.pars.verbose_level > 0:
print("Spike times:", spike_times)
return spike_times, spike_vals
class get_burst_peak_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
# should really use quadratic fit to get un-biased peaks
peak_vals = self.super_pars.ref_spike_vals
peak_t = self.super_pars.ref_spike_times
self.ref_traj = numeric_to_traj([peak_vals], 'peak_envelope',
self.super_pars.burst_coord, peak_t,
self.super_pars.ref_burst_pts_resampled.indepvarname,
discrete=False)
# discrete option false yields error if only one spike found, but error is cryptic!
if len(peak_t) > 1:
ref_env_ts = npy.linspace(peak_t[0], peak_t[-1],
self.pars.num_samples)
else:
ref_env_ts = npy.array(peak_t)
self.pars.ref_peak_vals = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)[0]
def evaluate(self, target):
# ignore target
dc_offset = self.super_results.baseline_V
# min and max events in model mean that these are recorded
# accurately in the pointsets already
peak_vals = self.super_results.spike_vals - dc_offset
peak_t = self.super_results.spike_times
self.results.burst_peak_env = numeric_to_traj([peak_vals],
'peak_envelope',
self.super_pars.burst_coord, peak_t,
self.super_pars.burst_pts_resampled.indepvarname,
discrete=False)
# burst_est = self.super_results.burst_est
# call_args = {}
# try:
# call_args['noise_floor'] = is_spike.pars.noise_tol
# except AttributeError:
# pass
# try:
# call_args['depvar'] = self.super_pars.burst_coord
# except AttributeError:
# pass
# try:
# call_args['tol'] = 1.1*burst_est.std_ISI/burst_est.mean_ISI
# except AttributeError:
# pass
# call_args['make_traj'] = False
# call_args['spest'] = burst_est
# env = spike_envelope(burst_est.pts, burst_est.mean_ISI,
# **call_args)
test_env_ts = npy.linspace(peak_t[0], peak_t[-1], self.pars.num_samples)
return self.metric(self.results.burst_peak_env(test_env_ts,
self.super_pars.burst_coord),
self.super_pars.ref_peak_vals) < self.pars.tol
class get_burst_trough_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
burst_pts = self.super_pars.ref_burst_pts_resampled
burst_est = self.super_pars.ref_burst_est
vals = burst_pts[self.super_pars.burst_coord]
inter_spike_ixs = [(burst_est.spike_ixs[i-1],
burst_est.spike_ixs[i]) \
for i in range(1, len(burst_est.spike_ixs))]
# should really use quadratic fit to get an un-biased minimum
trough_ixs = [npy.argmin(vals[ix_lo:ix_hi])+ix_lo for ix_lo, ix_hi in \
inter_spike_ixs]
trough_vals = [vals[i] for i in trough_ixs]
trough_t = [burst_pts.indepvararray[i] for i in trough_ixs]
self.ref_traj = numeric_to_traj([trough_vals], 'trough_envelope',
self.super_pars.burst_coord, trough_t,
burst_pts.indepvarname, discrete=False)
ref_env_ts = npy.linspace(trough_t[0], trough_t[-1],
self.pars.num_samples)
self.pars.ref_trough_vals = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)
def evaluate(self, target):
# ignore target
dc_offset = self.super_results.baseline_V
burst_pts = self.super_pars.burst_coord_pts
burst_est = self.super_results.burst_est
vals = burst_pts[self.super_pars.burst_coord]
ts = self.super_results.spike_times
spike_ixs = []
for t in ts:
tix = burst_pts.find(t, end=0)
spike_ixs.append(tix)
inter_spike_ixs = [(spike_ixs[i-1],
spike_ixs[i]) \
for i in range(1, len(ts))]
# min and max events in model mean that these are recorded
# accurately in the pointsets already
trough_ixs = [npy.argmin(vals[ix_lo:ix_hi])+ix_lo for ix_lo, ix_hi in \
inter_spike_ixs]
trough_vals = [vals[i] - dc_offset for i in trough_ixs]
# use self.pars.trough_t for isi mid-point times
trough_t = [burst_pts.indepvararray[i] for i in trough_ixs]
self.results.burst_trough_env = numeric_to_traj([trough_vals],
'trough_envelope',
self.super_pars.burst_coord,
trough_t,
burst_pts.indepvarname, discrete=False)
test_env_ts = npy.linspace(trough_t[0], trough_t[-1],
self.pars.num_samples)
self.results.trough_t = trough_t
return self.metric(self.results.burst_trough_env(test_env_ts,
self.super_pars.burst_coord),
self.super_pars.ref_trough_vals) < self.pars.tol
class get_burst_isi_env(qt_feature_leaf):
"""Requires tol and num_samples parameters.
"""
def _local_init(self):
self.metric = metric_L2()
self.metric_len = self.pars.num_samples
def postprocess_ref_traj(self):
burst_pts = self.super_pars.ref_burst_pts_resampled
ts = burst_pts.indepvararray
burst_est = self.super_pars.ref_burst_est
# find approximate (integer) mid-point index between spikes
mid_isi_ixs = [int(0.5*(burst_est.spike_ixs[i-1]+burst_est.spike_ixs[i])) \
for i in range(1, len(burst_est.spike_ixs))]
isi_t = [ts[i] for i in mid_isi_ixs]
isi_vals = [ts[burst_est.spike_ixs[i]]-ts[burst_est.spike_ixs[i-1]] for \
i in range(1, len(burst_est.spike_ixs))]
self.ref_traj = numeric_to_traj([isi_vals], 'isi_envelope',
self.super_pars.burst_coord, isi_t,
burst_pts.indepvarname, discrete=False)
ref_env_ts = npy.linspace(isi_t[0], isi_t[-1],
self.pars.num_samples)
self.pars.ref_isis = self.ref_traj(ref_env_ts,
self.super_pars.burst_coord)
def evaluate(self, target):
# ignore target
ts = self.super_results.spike_times
tname = self.super_pars.burst_coord_pts.indepvarname
isi_vals = [ts[i]-ts[i-1] for i in range(1, len(ts))]
self.results.burst_isi_env = numeric_to_traj([isi_vals],
'isi_envelope',
self.super_pars.burst_coord,
self.super_results.trough_t,
tname, discrete=False)
test_env_ts = npy.linspace(self.super_results.trough_t[0],
self.super_results.trough_t[-1],
self.pars.num_samples)
return self.metric(self.results.burst_isi_env(test_env_ts,
self.super_pars.burst_coord),
self.pars.ref_isis) < self.pars.tol
class get_burst_upsweep(qt_feature_leaf):
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.t_offs)
def postprocess_ref_traj(self):
vname = self.super_pars.burst_coord
ts = [self.super_pars.ref_spike_times[0] - toff for \
toff in self.pars.t_offs]
self.pars.ref_upsweep_V = npy.array([self.ref_traj(t, vname) for \
t in ts])
def evaluate(self, target):
dc_offset = self.super_results.baseline_V
vname = self.super_pars.burst_coord
all_pts = self.super_pars.burst_coord_pts
vals = []
for toff in self.pars.t_offs:
target_t = self.super_results.spike_times[0] - toff
if target_t < all_pts.indepvararray[0]:
# out of range - return penalty
self.metric.results = 5000*npy.ones((self.metric_len,),float)
return False
tix = all_pts.find(target_t, end=0)
new_var = make_poly_interpolated_curve(all_pts[tix-5:tix+5],
vname, target.model)
vals.append(new_var(target_t))
self.results.upsweep_V = npy.array(vals) - dc_offset
return self.metric(self.results.upsweep_V, \
self.pars.ref_upsweep_V) < self.pars.tol
class get_burst_downsweep(qt_feature_leaf):
def _local_init(self):
self.metric = metric_L2()
self.metric_len = len(self.pars.t_offs)
def postprocess_ref_traj(self):
vname = self.super_pars.burst_coord
ts = [self.super_pars.ref_spike_times[-1] + toff for \
toff in self.pars.t_offs]
self.pars.ref_downsweep_V = npy.array([self.ref_traj(t, vname) for \
t in ts])
def evaluate(self, target):
dc_offset = self.super_results.baseline_V
vname = self.super_pars.burst_coord
all_pts = self.super_pars.burst_coord_pts
vals = []
for toff in self.pars.t_offs:
target_t = self.super_results.spike_times[-1] + toff
if target_t > all_pts.indepvararray[-1]:
# out of range - return penalty
self.metric.results = 5000*npy.ones((self.metric_len,),float)
return False
tix = all_pts.find(target_t, end=0)
new_var = make_poly_interpolated_curve(all_pts[tix-5:tix+5],
vname, target.model)
vals.append(new_var(target_t))
self.results.downsweep_V = npy.array(vals) - dc_offset
return self.metric(self.results.downsweep_V,
self.pars.ref_downsweep_V) < self.pars.tol
class get_burst_num_spikes(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float()
self.metric_len = 1
def evaluate(self, target):
return self.metric(npy.array(len(self.super_results.spike_times)),
npy.array(len(self.super_pars.ref_spike_times))) == 0
class get_burst_period_info(qt_feature_leaf):
def _local_init(self):
self.metric = metric_weighted_L2()
self.metric_len = 2
# strongly penalize lack of periodicity
self.metric.weights = npy.array([1., 1000.])
def evaluate(self, target):
return self.metric(npy.array([self.super_results.period,
self.super_results.period_val_error]),
npy.array([self.super_pars.ref_period,
0.])) \
< self.pars.tol
# --------------------------------------------
class spike_metric(metric):
"""Measures the distance between spike time and height,
using an inherent weighting of height suited to neural voltage
signals (0.05 of time distance)."""
def __call__(self, sp1, sp2):
# weight 'v' component down b/c 't' values are on a different scale
self.results = npy.array(sp1-sp2).flatten()*npy.array([1,0.05])
return | npy.linalg.norm(self.results) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
import numpy as np
#import matplotlib.pyplot as plt
class CF(object):
def __init__(self, n_user, n_item, n_factor, lambd):
self.n_user = n_user
self.n_item = n_item
self.n_factor = n_factor
self.lambd = lambd
# 初始化用户偏好矩阵和物品(电影)特征矩阵
self.U = np.random.normal(0, 0.01, (n_user, n_factor))
self.I = np.random.normal(0, 0.01, (n_factor, n_item))
self.trainloss = []
self.testloss = []
self.snapshot = []
def predict(self):
# 任务1a:根据self.U 和 self.I 计算模型预测的评分
pass
def mse(self, Y, W):
# Y is rating matrix
# W is weight(or mask) matrix
# 计算预测值和实际值的均方误差
return np.sum(((self.predict() - Y) * W)**2) / W.sum()
def update(self, Y, W):
# Alternating Least Square
for u, Wu in enumerate(W):
# 更新self.U的每一行,即每个用户的特征
self.U[u] = np.linalg.solve(np.dot(self.I, np.dot(np.diag(Wu), self.I.T)) + self.lambd * np.eye(self.n_factor),\
np.dot(self.I, np.dot( | np.diag(Wu) | numpy.diag |
"""Display image and 1d plot."""
import napari
import numpy as np
from skimage import data
import napari_plot
from napari_plot._qt.qt_viewer import QtViewer
# create the viewer with an image
viewer = napari.view_image(data.astronaut(), rgb=True)
viewer1d = napari_plot.ViewerModel1D()
widget = QtViewer(viewer1d, parent=viewer.window.qt_viewer.parent())
# viewer1d.add_line(np.c_[np.arange(100), np.arange(100) + 300], name="line")
viewer1d.add_centroids(
np.c_[np.arange(20), np.random.randint(0, 100, 20), np.random.randint(0, 100, 20)],
name="centroids (y)",
orientation="horizontal",
)
viewer1d.add_centroids(
np.c_[np.arange(20), np.random.randint(0, 100, 20), np.random.randint(0, 100, 20)],
name="centroids (x)",
orientation="vertical",
)
viewer1d.add_centroids(np.c_[np.zeros(100), | np.arange(100) | numpy.arange |
'''*-----------------------------------------------------------------------*---
Author: <NAME>
Date : Feb 15 2020
SARC
File Name : env.py
Description: Environment module for simulation
---*-----------------------------------------------------------------------*'''
import math
import numpy as np
import random
from scipy.spatial import Voronoi, voronoi_plot_2d, KDTree
import sys
import time
import drone
import utils
import vis
#TODO keep track of where broadcasts are occuring
#TODO radio propagation model
#TODO point of interest model
BANDWIDTH = 1.0
class env():
def __init__(self, n_drones, p_bounds, M, F, v_max):
self.p_bounds = p_bounds
self.n_drones = n_drones
self.M = M
self.F = F
self.v_max = v_max
self.drn = []
#self.poi = []
#self.poi_active = []
self.tx = {}
self.bs = []
self.t = 0
def setup(self):
#self.bs = [bs.base_station([0,0])] #set up for multiple base stations in future work
#generate or load in situation, including drone positions, pois, freqs
'''
for i in range(self.n_drones):
x = random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])
y = random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])
self.drn.append(drone.drone(i, [x, y], 1))
#self.g.add_node(len(self.bs) + i, p=self.drn[i].pos)
'''
#for i in range(N_POI):
# self.poi.append(poi.poi([random.uniform(self.p_bounds[0][0], self.p_bounds[0][1]), random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])], random.randint(0, 500), 500))
#sort pois by start time
#self.poi.sort(key=lambda x: x.t_start)
#random.seed(1)
self.gt = np.zeros((self.n_drones, 2))
'''
self.gt[0][0] = -200
self.gt[0][1] = -200
self.gt[1][0] = 100
self.gt[1][1] = -100
self.gt[2][0] = -100
self.gt[2][1] = 100
self.gt[3][0] = 100
self.gt[3][1] = 100
'''
'''
for i in range(self.n_drones):
#self.gt[i][0] = random.uniform(self.p_bounds[0][0], self.p_bounds[0][1])
#self.gt[i][1] = random.uniform(self.p_bounds[1][0], self.p_bounds[1][1])
self.gt[i][0] = np.clip(random.gauss(0, 150), self.p_bounds[0][0], self.p_bounds[0][1])
self.gt[i][1] = np.clip(random.gauss(0, 150), self.p_bounds[1][0], self.p_bounds[1][1])
'''
#line
self.gt[0][0] = 400
self.gt[0][1] = -400
self.gt[1][0] = 400
self.gt[1][1] = -300
self.gt[2][0] = 400
self.gt[2][1] = -200
self.gt[3][0] = 400
self.gt[3][1] = 400
#square
self.gt[4][0] = -400
self.gt[4][1] = 400
self.gt[5][0] = -400
self.gt[5][1] = 300
self.gt[6][0] = -300
self.gt[6][1] = 300
self.gt[7][0] = -300
self.gt[7][1] = 400
#for k in range(self.n_drones):
# print("\\addplot[color=green,mark=square] coordinates{(%.2f,%.2f)};" % (self.gt[k][0], self.gt[k][1]))
#'''
#drone trajectory init
self.init_q = | np.zeros((self.n_drones, self.M, 2)) | numpy.zeros |
#from __future__ import print_function
import cv2
import numpy as np
import os
import glob
import math
import time
#from dronekit import connect
def arm_and_takeoff(vehicle):
print("Basic pre-arm checks")
# Don't try to arm until autopilot is ready
#while not vehicle.is_armable:
#print(" Waiting for vehicle to initialise...")
#time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
#vehicle.mode = VehicleMode("STABILIZE")
vehicle.armed = True
'''
vehicle.channels.overrides = {'1':Roll} #Roll
vehicle.channels.overrides = {'2':Pitch} #Pitch
vehicle.channels.overrides = {'3':Throttle} #Throttle
vehicle.channels.overrides = {'4':1500} #Yaw
'''
#out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (1280,720))
cap = cv2.VideoCapture(0)
dst = None
def find(DIM, K, D, img_files, img_order = 0):
'''
print('Connecting to vehicle on: /dev/ttyAMA0')
vehicle = connect('/dev/ttyAMA0', wait_ready=True, baud=921600)
arm_and_takeoff(vehicle)
'''
differences = []
img = cv2.imread(img_files[img_order],-1)
akaze = cv2.AKAZE_create()
kp_image, desc_image = akaze.detectAndCompute(img,None)
matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)
'''
index_params = dict(algorithm=0,trees = 5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
'''
try:
while True:
start = time.time()
_,frame = cap.read()
h,w = frame.shape[:2]
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
grayframe = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2GRAY)
kp_grayframe, desc_grayframe = akaze.detectAndCompute(grayframe,None)
try:
matches = matcher.knnMatch(desc_image, desc_grayframe, 2)
except Exception as e:
print(e)
matches = []
good_points=[]
for m,n in matches:
if m.distance < 0.8*n.distance:
good_points.append(m)
#homography
if len(good_points) > 10:
query_points = np.float32([kp_image[m.queryIdx].pt for m in good_points]).reshape(-1,1,2)
train_pts = np.float32([kp_grayframe[m.trainIdx].pt for m in good_points]).reshape(-1,1,2)
matrix = np.array([])
matrix, mask = cv2.findHomography(query_points, train_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
h, w = img.shape[:-1]
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
try:
if np.any(matrix == None):
matrix = np.array([])
if not matrix.size == 0:
dst = cv2.perspectiveTransform(pts, matrix)
dst_1 = tuple(np.int32(dst[0]))
a = dst_1[0]
pt_1 = (a[0],a[1])
dst_2 = tuple( | np.int32(dst[1]) | numpy.int32 |
import pytest
import numpy as np
@pytest.fixture()
def diverse_repo(repo):
co = repo.checkout(write=True)
co.add_ndarray_column('test', prototype=np.arange(10))
co.add_str_column('test_meta')
co.add_bytes_column('test_bytes')
co.columns['test'][0] = np.arange(10)
co.columns['test'][1] = np.arange(10) + 1
co.columns['test'][2] = np.arange(10) + 2
co.columns['test'][3] = np.arange(10) + 3
co.columns['test'][4] = np.arange(10) + 4
co['test_meta']['hi'] = 'foo'
co['test_meta']['aea'] = 'eeae'
co['test_bytes']['lol'] = b'foo bytes'
co.commit('hello world')
sample_trimg = np.arange(50).reshape(5, 10).astype(np.uint8)
sample_trlabel = np.array([0], dtype=np.int64)
sample_vimg = np.zeros(50).reshape(5, 10).astype(np.uint16)
sample_vlabel = | np.array([1], dtype=np.int32) | numpy.array |
"""
Backend based on Numpy.
This code is used to test the theano backend and is a possible option in
the preprocessing module. It is too slow to actually train neural networks.
"""
#%%
import numpy as np
from scipy.special import expit
from scipy.misc import logsumexp as scipy_logsumexp
from natural_bm.backend.common import epsilon, set_epsilon, floatx, set_floatx, cast_to_floatx, intx, set_intx
#%% Variables
def variable(value, dtype=None, name=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
# Returns
A variable instance .
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'eval'):
value = value.eval()
return np.asarray(value, dtype=dtype)
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiate an input data placeholder variable. """
raise NotImplementedError('This function is not implemented for the numpy backend.')
def shape(x):
"""Returns the shape of a tensor. """
return x.shape
def ndim(x):
"""Returns the dimension of a tensor. """
return x.ndim
def dtype(x):
"""Returns the dtype of a tensor as a string. """
return x.dtype.name
def eval(x):
"""Returns the value of a tensor. """
return x
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable. """
if dtype is None:
dtype = floatx()
return variable(np.zeros(shape), dtype, name)
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable. """
if dtype is None:
dtype = floatx()
return variable(np.ones(shape), dtype, name)
def eye(size, dtype=None, name=None):
"""Instantiates an identity matrix. """
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable with the same shape as x. """
return np.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable with the same shape as x. """
return np.zeros_like(x, dtype=dtype)
def cast(x, dtype):
"""Casts x to dtype. """
if isinstance(x, np.ndarray):
x = x.astype(dtype)
else:
x = np.asarray(x, dtype).item()
return x
#%% LINEAR ALGEBRA
"""
Assumed overridden:
+, -, /, *, +=, -=, *=, /=
"""
def dot(x, y):
"""Dot product of x and y """
return np.dot(x, y)
def transpose(x):
"""Tensor transpose """
return np.transpose(x)
def svd(x):
"""Singular value decomposition (SVD) of x. Returns U, S, V. """
return np.linalg.svd(x)
def diag(x):
"""Extracts diagonal of a tensor. """
return np.diag(x)
def fill_diagonal(x, val):
"""Fills in the diagonal of a tensor. """
n = x.shape[0]
x[range(n), range(n)] = val
return x
def solve(a, b):
"""Solves the equation ax=b for x. """
return np.linalg.solve(a, b)
#%% ELEMENT-WISE OPERATIONS
def _keras_axis(x, axis):
"""This is what keras expects axis to do for things like mean, std, etc
"""
if isinstance(axis, list):
assert len(axis) == 2 and axis[1] == -1, 'Trying to match behavior from keras backend tests'
x = np.reshape(x, (x.shape[0], -1))
axis = axis[0]
return x, axis
def max(x, axis=None, keepdims=False):
"""Max of the values in a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.max(x, axis=axis, keepdims=keepdims)
def min(x, axis=None, keepdims=False):
"""Min of the values in a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.min(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.sum(x, axis=axis, keepdims=keepdims)
def prod(x, axis=None, keepdims=False):
"""Multiply the values in a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.prod(x, axis=axis, keepdims=keepdims)
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
# Returns
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return np.cumsum(x, axis=axis)
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the product.
# Returns
A tensor of the cumulative product of values of `x` along `axis`.
"""
return np.cumprod(x, axis=axis)
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.mean(x, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
"""Standard deviation of the values in a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.std(x, axis=axis, keepdims=keepdims)
def var(x, axis=None, keepdims=False):
"""Variance of the values in a tensor, alongside the specified axis. """
x, axis = _keras_axis(x, axis)
return np.var(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR). """
x, axis = _keras_axis(x, axis)
return np.bitwise_or(x, axis=axis, keepdims=keepdims)
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND). """
x, axis = _keras_axis(x, axis)
return np.bitwise_and(x, axis=axis, keepdims=keepdims)
def argmax(x, axis=-1):
"""Index of the maximum of the values in a tensor, alongside the specified axis. """
return np.argmax(x, axis=axis)
def argmin(x, axis=-1):
"""Index of the maximum of the values in a tensor, alongside the specified axis. """
return np.argmin(x, axis=axis)
def square(x):
"""Elementwise square of a tensor. """
return np.square(x)
def abs(x):
"""Absolute value of a tensor. """
return np.abs(x)
def sqrt(x):
"""Square root of a tensor after clipping to positive definite. """
x = np.clip(x, 0., np.inf)
return np.sqrt(x)
def exp(x):
"""Exponential of a tensor. """
return np.exp(x)
def log(x):
"""Natural logarithm of a tensor. """
return np.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
# Arguments
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
# Returns
The reduced tensor.
"""
return scipy_logsumexp(x, axis=axis, keepdims=keepdims)
def logdiffexp(x, axis=None, keepdims=False):
"""Computes the log(diff(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(diff(exp(x))).
"""
assert x.shape[0] == 2
a = np.max(x)
logdiff = a + np.log(np.diff(np.exp(x-a)))
logdiff = logdiff.item()
return logdiff
def round(x):
"""Round tensor to nearest integer. Rounds half to even. """
return np.round(x)
def sign(x):
"""Sign of tensor. """
return np.sign(x)
def pow(x, a):
"""Elementwise power of a tensor. """
return np.power(x, a)
def clip(x, min_value, max_value):
"""Clips tensor x to be between min_value and max_value """
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
return np.clip(x, min_value, max_value)
def equal(x, y):
"""Elementwise x == y """
return np.equal(x, y)
def not_equal(x, y):
"""Elementwise x != y """
return np.not_equal(x, y)
def greater(x, y):
"""Elementwise x > y """
return np.greater(x, y)
def greater_equal(x, y):
"""Elementwise x >= y """
return np.greater_equal(x, y)
def less(x, y):
"""Elementwise x < y """
return np.less(x, y)
def less_equal(x, y):
"""Elementwise x <= y """
return np.less_equal(x, y)
def maximum(x, y):
"""Elementwise maximum """
return np.maximum(x, y)
def minimum(x, y):
"""Elementwise minimum """
return np.minimum(x, y)
def sin(x):
"""Elementwise sine """
return np.sin(x)
def cos(x):
"""Elementwise cosine """
return np.cos(x)
#%% SHAPE OPERATIONS
def concatenate(tensors, axis=-1):
"""Concatenates list of tensors along given axis """
return np.concatenate([x for x in tensors], axis=axis)
def reshape(x, shape):
"""Reshapes tensor x to given shape """
return | np.reshape(x, shape) | numpy.reshape |
import numpy as np
from openmdao.api import Group, IndepVarComp, ParallelGroup, ScipyGMRES, NLGaussSeidel
from openmdao.core.mpi_wrap import MPI
if MPI:
from openmdao.api import PetscKSP
from wakeexchange.floris import floris_wrapper, add_floris_params_IndepVarComps
from wakeexchange.gauss import add_gauss_params_IndepVarComps
from GeneralWindFarmComponents import WindFrame, AdjustCtCpYaw, MUX, WindFarmAEP, DeMUX, \
CPCT_Interpolate_Gradients_Smooth, WindDirectionPower, add_gen_params_IdepVarComps, \
CPCT_Interpolate_Gradients
class RotorSolveGroup(Group):
def __init__(self, nTurbines, direction_id=0, datasize=0, differentiable=True,
use_rotor_components=False, nSamples=0, wake_model=floris_wrapper,
wake_model_options=None):
super(RotorSolveGroup, self).__init__()
if wake_model_options is None:
wake_model_options = {'differentiable': differentiable, 'use_rotor_components': use_rotor_components,
'nSamples': nSamples}
from openmdao.core.mpi_wrap import MPI
# set up iterative solvers
epsilon = 1E-6
if MPI:
self.ln_solver = PetscKSP()
else:
self.ln_solver = ScipyGMRES()
self.nl_solver = NLGaussSeidel()
self.ln_solver.options['atol'] = epsilon
self.add('CtCp', CPCT_Interpolate_Gradients_Smooth(nTurbines, direction_id=direction_id, datasize=datasize),
promotes=['gen_params:*', 'yaw%i' % direction_id,
'wtVelocity%i' % direction_id, 'Cp_out'])
# TODO refactor the model component instance
self.add('floris', wake_model(nTurbines, direction_id=direction_id, wake_model_options=wake_model_options),
promotes=(['model_params:*', 'wind_speed', 'axialInduction',
'turbineXw', 'turbineYw', 'rotorDiameter', 'yaw%i' % direction_id, 'hubHeight',
'wtVelocity%i' % direction_id]
if (nSamples == 0) else
['model_params:*', 'wind_speed', 'axialInduction',
'turbineXw', 'turbineYw', 'rotorDiameter', 'yaw%i' % direction_id, 'hubHeight',
'wtVelocity%i' % direction_id, 'wsPositionX', 'wsPositionY', 'wsPositionZ',
'wsArray%i' % direction_id]))
self.connect('CtCp.Ct_out', 'floris.Ct')
class DirectionGroup(Group):
"""
Group containing all necessary components for wind plant calculations
in a single direction
"""
def __init__(self, nTurbines, direction_id=0, use_rotor_components=False, datasize=0,
differentiable=True, add_IdepVarComps=True, params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args=None, nSamples=0, wake_model=floris_wrapper, wake_model_options=None, cp_points=1,
cp_curve_spline=None):
super(DirectionGroup, self).__init__()
if add_IdepVarComps:
if params_IdepVar_func is not None:
if (params_IndepVar_args is None) and (wake_model is floris_wrapper):
params_IndepVar_args = {'use_rotor_components': False}
elif params_IndepVar_args is None:
params_IndepVar_args = {}
params_IdepVar_func(self, **params_IndepVar_args)
add_gen_params_IdepVarComps(self, datasize=datasize)
self.add('directionConversion', WindFrame(nTurbines, differentiable=differentiable, nSamples=nSamples),
promotes=['*'])
if use_rotor_components:
self.add('rotorGroup', RotorSolveGroup(nTurbines, direction_id=direction_id,
datasize=datasize, differentiable=differentiable,
nSamples=nSamples, use_rotor_components=use_rotor_components,
wake_model=wake_model, wake_model_options=wake_model_options),
promotes=(['gen_params:*', 'yaw%i' % direction_id, 'wtVelocity%i' % direction_id,
'model_params:*', 'wind_speed', 'axialInduction',
'turbineXw', 'turbineYw', 'rotorDiameter', 'hubHeight']
if (nSamples == 0) else
['gen_params:*', 'yaw%i' % direction_id, 'wtVelocity%i' % direction_id,
'model_params:*', 'wind_speed', 'axialInduction',
'turbineXw', 'turbineYw', 'rotorDiameter', 'hubHeight', 'wsPositionX', 'wsPositionY', 'wsPositionZ',
'wsArray%i' % direction_id]))
else:
self.add('CtCp', AdjustCtCpYaw(nTurbines, direction_id, differentiable),
promotes=['Ct_in', 'Cp_in', 'gen_params:*', 'yaw%i' % direction_id])
self.add('myModel', wake_model(nTurbines, direction_id=direction_id, wake_model_options=wake_model_options),
promotes=(['model_params:*', 'wind_speed', 'axialInduction',
'turbineXw', 'turbineYw', 'rotorDiameter', 'yaw%i' % direction_id, 'hubHeight',
'wtVelocity%i' % direction_id]
if (nSamples == 0) else
['model_params:*', 'wind_speed', 'axialInduction',
'turbineXw', 'turbineYw', 'rotorDiameter', 'yaw%i' % direction_id, 'hubHeight',
'wtVelocity%i' % direction_id, 'wsPositionXw', 'wsPositionYw', 'wsPositionZ',
'wsArray%i' % direction_id]))
self.add('powerComp', WindDirectionPower(nTurbines=nTurbines, direction_id=direction_id, differentiable=True,
use_rotor_components=use_rotor_components, cp_points=cp_points,
cp_curve_spline=cp_curve_spline),
promotes=['air_density', 'generatorEfficiency', 'rotorDiameter',
'wtVelocity%i' % direction_id, 'rated_power',
'wtPower%i' % direction_id, 'dir_power%i' % direction_id, 'cut_in_speed', 'cp_curve_cp',
'cp_curve_vel'])
if use_rotor_components:
self.connect('rotorGroup.Cp_out', 'powerComp.Cp')
else:
self.connect('CtCp.Ct_out', 'myModel.Ct')
self.connect('CtCp.Cp_out', 'powerComp.Cp')
class AEPGroup(Group):
"""
Group containing all necessary components for wind plant AEP calculations using the FLORIS model
"""
def __init__(self, nTurbines, nDirections=1, use_rotor_components=False, datasize=0,
differentiable=True, optimizingLayout=False, nSamples=0, wake_model=floris_wrapper,
wake_model_options=None, params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args=None, cp_points=1, cp_curve_spline=None, rec_func_calls=False):
super(AEPGroup, self).__init__()
if wake_model_options is None:
wake_model_options = {'differentiable': differentiable, 'use_rotor_components': use_rotor_components,
'nSamples': nSamples, 'verbose': False}
# providing default unit types for general MUX/DeMUX components
power_units = 'kW'
direction_units = 'deg'
wind_speed_units = 'm/s'
# add necessary inputs for group
self.add('dv0', IndepVarComp('windDirections', np.zeros(nDirections), units=direction_units), promotes=['*'])
self.add('dv1', IndepVarComp('windSpeeds', np.zeros(nDirections), units=wind_speed_units), promotes=['*'])
self.add('dv2', IndepVarComp('windFrequencies', np.ones(nDirections)), promotes=['*'])
self.add('dv3', IndepVarComp('turbineX', np.zeros(nTurbines), units='m'), promotes=['*'])
self.add('dv4', IndepVarComp('turbineY', np.zeros(nTurbines), units='m'), promotes=['*'])
self.add('dv4p5', IndepVarComp('hubHeight', np.zeros(nTurbines), units='m'), promotes=['*'])
# add vars to be seen by MPI and gradient calculations
self.add('dv5', IndepVarComp('rotorDiameter', np.zeros(nTurbines), units='m'), promotes=['*'])
self.add('dv6', IndepVarComp('axialInduction', np.zeros(nTurbines)), promotes=['*'])
self.add('dv7', IndepVarComp('generatorEfficiency', np.zeros(nTurbines)), promotes=['*'])
self.add('dv8', IndepVarComp('air_density', val=1.1716, units='kg/(m*m*m)'), promotes=['*'])
self.add('dv9', IndepVarComp('rated_power', np.ones(nTurbines)*5000., units='kW',
desc='rated power for each turbine', pass_by_obj=True), promotes=['*'])
if not use_rotor_components:
self.add('dv10', IndepVarComp('Ct_in', np.zeros(nTurbines)), promotes=['*'])
self.add('dv11', IndepVarComp('Cp_in', np.zeros(nTurbines)), promotes=['*'])
self.add('dv12', IndepVarComp('cp_curve_cp', np.zeros(datasize),
desc='cp curve cp data', pass_by_obj=True), promotes=['*'])
self.add('dv13', IndepVarComp('cp_curve_vel', np.zeros(datasize), units='m/s',
desc='cp curve velocity data', pass_by_obj=True), promotes=['*'])
self.add('dv14', IndepVarComp('cut_in_speed', | np.zeros(nTurbines) | numpy.zeros |
import os
import tempfile
import pytest
import numpy as np
from pycorr import TwoPointCounter, AnalyticTwoPointCounter, utils, setup_logging
from pycorr.twopoint_counter import TwoPointCounterError
def diff(position2, position1):
return [p2 - p1 for p1, p2 in zip(position1, position2)]
def midpoint(position1, position2):
return [p2 + p1 for p1, p2 in zip(position1, position2)]
def norm(position):
return (sum(p**2 for p in position))**0.5
def dotproduct(position1, position2):
return sum(x1 * x2 for x1, x2 in zip(position1, position2))
def dotproduct_normalized(position1, position2):
return dotproduct(position1, position2) / (norm(position1) * norm(position2))
def get_weight(xyz1, xyz2, weights1, weights2, n_bitwise_weights=0, twopoint_weights=None, nrealizations=None, noffset=1, default_value=0.):
if nrealizations is None:
weight = 1
else:
denom = noffset + sum(bin(w1 & w2).count('1') for w1, w2 in zip(weights1[:n_bitwise_weights], weights2[:n_bitwise_weights]))
weight = default_value if denom == 0 else nrealizations / denom
for w1, w2 in zip(weights1[n_bitwise_weights:], weights2[n_bitwise_weights:]):
weight *= w1 * w2
if twopoint_weights is not None:
sep_twopoint_weights = twopoint_weights.sep
twopoint_weights = twopoint_weights.weight
if all(x1 == x2 for x1, x2 in zip(xyz1, xyz2)): costheta = 1.
else: costheta = min(dotproduct_normalized(xyz1, xyz2), 1)
if (sep_twopoint_weights[0] < costheta <= sep_twopoint_weights[-1]):
ind_costheta = | np.searchsorted(sep_twopoint_weights, costheta, side='left', sorter=None) | numpy.searchsorted |
from __future__ import annotations
import qimpy as qp
import numpy as np
import torch
from qimpy.utils import TaskDivision, BufferView
from typing import Tuple, Callable
IndicesType = Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
MethodFFT = Callable[["qp.grid.Grid", torch.Tensor], torch.Tensor]
FunctionFFT = Callable[[torch.Tensor], torch.Tensor]
def _init_grid_fft(self: qp.grid.Grid) -> None:
"""Initialize local or parallel FFTs for class Grid."""
# Half-reciprocal space global dimensions (for rfft/irfft):
self.shapeH = (self.shape[0], self.shape[1], 1 + self.shape[2] // 2)
qp.log.info(f"real-fft shape: {self.shapeH}")
# MPI division:
self.split0 = TaskDivision(
n_tot=self.shape[0], n_procs=self.n_procs, i_proc=self.i_proc
)
self.split2 = TaskDivision(
n_tot=self.shape[2], n_procs=self.n_procs, i_proc=self.i_proc
)
self.split2H = TaskDivision(
n_tot=self.shapeH[2], n_procs=self.n_procs, i_proc=self.i_proc
)
# Overall local grid dimensions:
self.shapeR_mine = (self.split0.n_mine, self.shape[1], self.shape[2])
self.shapeG_mine = (self.shape[0], self.shape[1], self.split2.n_mine)
self.shapeH_mine = (self.shape[0], self.shape[1], self.split2H.n_mine)
if self.n_procs > 1:
qp.log.info(f"split over {self.n_procs} processes:")
qp.log.info(f" local selected shape: {self.shapeR_mine}")
qp.log.info(f" local full-fft shape: {self.shapeG_mine}")
qp.log.info(f" local real-fft shape: {self.shapeH_mine}")
# Create 1D grids for real and reciprocal spaces:
# --- global versions first
iv1D = tuple(torch.arange(s, device=qp.rc.device) for s in self.shape)
iG1D = tuple(
torch.where(iv <= self.shape[dim] // 2, iv, iv - self.shape[dim])
for dim, iv in enumerate(iv1D)
)
# --- slice parts of Real, G-space and Half G-space for `get_mesh`:
self._mesh1D = { # Global versions:
"R": iv1D,
"G": iG1D,
"H": iG1D[:2] + (iG1D[2][: self.shapeH[2]],),
}
self._mesh1D_mine = { # Local versions:
"R": (iv1D[0][self.split0.i_start : self.split0.i_stop],) + iv1D[1:],
"G": iG1D[:2] + (iG1D[2][self.split2.i_start : self.split2.i_stop],),
"H": iG1D[:2] + (iG1D[2][self.split2H.i_start : self.split2H.i_stop],),
}
def get_indices(in_prev: np.ndarray, n_out_mine: int) -> IndicesType:
"""Get index arrays for unscrambling data after MPI rearrangement.
A common operation below is taking an array split along axis
'in' and doing an MPI all-to-all to split it along axis 'out'.
Before the MPI transfer, the array must be rearranged to bring
the out axis as dimension 0. After doing this, the array will
have dimensions n_out x (batch-dims) x n_inMine x S[1]. Note
that the middle spatial dimension S[1] is never split.
The differing chunk-size in all-to-all scrambles the result,
and this routine provides indices that put the data in the right
order to then view as (batch-dims) x n_out_mine x S[1] x n_in.
The results of this function should be linearly combined with 1,
i_batch and n_batch to get net indexes for a given batch size.
Parameters
----------
in_prev : numpy.array of ints
Cumulative counts of dimension split at input
n_out_mine : int
Local length of dimension split at output
Returns
-------
index_1 : torch.Tensor of ints
Coefficient of 1 in final index
index_i_batch : torch.Tensor of ints
Coefficient of i_batch in final index
index_n_batch : torch.Tensor of ints
Coefficient of n_batch in final index
"""
i_out_mine = np.arange(n_out_mine) # 1D index on out-split array
i_in = np.arange(in_prev[-1]) # 1D index on out-unified array
in_each = in_prev[1] - in_prev[0] # block size of input split
in_counts = | np.diff(in_prev) | numpy.diff |
"""
<NAME>
Date: June 16, 2021
functions for calculating Solar velocity corrections
and components for derivation of SDO/HMI RVs
"""
import datetime
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.map
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.coordinates import frames
from skimage.measure import label, regionprops
from SolAster.tools.settings import Parameters
def map_sequence(dates_list, time_range=datetime.timedelta(seconds=6), instrument=a.Instrument.aia,
wavelength=a.Wavelength(171 * u.angstrom)):
"""
function to query sunpy for images within certain time range of dates in dates list
user specified instrument and wavelength, otherwise default values: AIA 171A
Parameters
----------
dates_list: datetime, list
list of dates, either datetime or strings
time_range: datetime timedelta
plus/minus time range to search for images in comparison to desired timestamp
instrument: astropy inst
Sunpy instrument of choice (AIA, HMI, LASCO, EIT)
wavelength: astropy wvlth
desired wavelength of choice instrument
Returns
-------
maps: map
Sunpy map sequence object
"""
if isinstance(dates_list[0][0], str):
datetimes = [datetime.datetime.strptime(date[0], '%Y-%m-%dT%H:%M:%S') for date in dates_list]
else:
datetimes = dates_list
downloaded_files = []
for ind, datetime_object in enumerate(datetimes):
# pull image within specified time range
result = Fido.search(a.Time(str(datetime_object - time_range), str(datetime_object + time_range)),
instrument, wavelength)
# add file to list
downloaded_files.append(Fido.fetch(result))
# build map sequence from files
maps = sunpy.map.Map(downloaded_files, sequence=True)
return maps
def rel_positions(wij, nij, rij, smap):
"""
function to calculate pixel-wise relative positions in new coordinate frame
Parameters
----------
wij: float, array
array of westward values for image
nij: float, array
array of northward values for image
rij: float, array
array of radius values for image
smap: map
Sunpy map object
Returns
-------
deltaw: float, array
relative westward position of pixel
deltan: float, array
relative northward position of pixel
deltar: float, array
relative radial position of pixel
dij: float
distance between pixel ij and spacecraft
"""
# calculate relative positions of each pixel
rsc = smap.meta['dsun_obs'] / smap.meta['rsun_ref']
deltaw = wij
deltan = nij
deltar = rij - rsc
dij = np.sqrt(deltaw ** 2 + deltan ** 2 + deltar ** 2)
return deltaw, deltan, deltar, dij
def spacecraft_vel(deltaw, deltan, deltar, dij, vmap):
"""
function to calculate pixel-wise spacecraft velocities for Sunpy map
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
deltaw: float, array
relative westward position of pixel
deltan: float, array
relative northward position of pixel
deltar: float, array
relative radial position of pixel
dij: float
distance between pixel ij and spacecraft
vmap: map
Sunpy map object (Dopplergram)
Returns
-------
vsc: float, array
array of spacecraft velocities
"""
# velocity of spacecraft relative to sun
vscw = vmap.meta['obs_vw']
vscn = vmap.meta['obs_vn']
vscr = vmap.meta['obs_vr']
# pixel-wise magnitude of spacecraft velocity
vsc = - (deltaw * vscw + deltan * vscn + deltar * vscr) / dij
return vsc
def solar_rot_vel(wij, nij, rij, deltaw, deltan, deltar, dij, vmap, a_parameters=[Parameters.a1, Parameters.a2, Parameters.a3]):
"""
function to calculate pixel-wise velocities due to solar rotation
Based on Haywood et al. (2016) and described in Ervin et al. (2021) - In Prep.
Parameters
----------
wij: float, array
array of westward values for image
nij: float, array
array of northward values for image
rij: float, array
array of radius values for image
deltaw: float, array
relative westward position of pixel
deltan: float, array
relative northward position of pixel
deltar: float, array
relative radial position of pixel
dij: float
distance between pixel ij and spacecraft
vmap: map
Sunpy map object (Dopplergram)
a_parameters: float, array
array of solar differential rotation parameters from Snodgrass & Ulrich (1990).
Returns
-------
vrot: float, array
array of solar rotation velocities\
"""
# apply to cartesian coordinates
x1 = wij
y1 = nij * np.cos(np.deg2rad(vmap.meta['crlt_obs'])) + rij * np.sin(np.deg2rad(vmap.meta['crlt_obs']))
z1 = - nij * np.sin( | np.deg2rad(vmap.meta['crlt_obs']) | numpy.deg2rad |
#!/usr/bin/env python
# coding: utf-8
# # Multiple-Network Representation Learning
# ## Aliens and Humans
# Say you're a brain researcher, and you have a bunch of scans of brains - some are scans of people, and some are scans of aliens. You have some code that estimates networks from your scans, so you turn all your scans into networks. The nodes represent the brain regions which are common to both humans and aliens (isn't evolution amazing?), and the edges represent communication between these brain regions. You want to know if the human and alien networks share a common grouping of regions (your research topic is titled, "Do Alien Brains Have The Same Hemispheres That We Do?"). What do you do? How do you even deal with situations in which you have a lot of networks whose nodes all represent the same objects, but whose edges might come from totally different distributions?
#
# Well, if your goal is to find the shared grouping of regions between the human and alien networks, you could try embedding your networks and then seeing what those embeddings look like. This would serve the dual purpose of having less stuff to deal with and having some way to directly compare all of your networks in the same space. Finding an embedding is also simply useful in general, because embedding a network or group of networks opens the door to machine learning methods designed for tabular data.
#
# For example, say you have four alien networks and four human networks. Since alien brain networks aren't currently very accessible, we'll just simulate our human and alien networks with Stochastic Block Models. The communities that we're trying to group all of the brain regions into are the two hemispheres of the brain. We'll design the human brains to have strong connections within hemispheres, and we'll design the alien brains to have strong connections between hemispheres -- but the same regions still correspond to the same hemispheres.
#
# we'll use a relatively small number of nodes and fairly small block probabilities. You can see the specific parameters in the code below.
# In[1]:
import warnings
import numpy as np
warnings.filterwarnings("ignore")
np.random.seed(42)
get_ipython().run_line_magic('load_ext', 'autoreload')
# In[2]:
import numpy as np
from graspologic.simulations import sbm
# Generate networks from an SBM, given some parameters
def make_sbm(*probs, n=100, return_labels=False):
pa, pb, pc, pd = probs
P = np.array([[pa, pb],
[pc, pd]])
return sbm([n, n], P, return_labels=return_labels)
# make nine human networks
# and nine alien networks
p1, p2, p3 = .12, .06, .03
n = 100
labels = [0]*n + [1]*n
humans = [make_sbm(p1, p3, p3, p1, n=n) for i in range(4)]
aliens = [make_sbm(p3, p1, p1, p3, n=n) for i in range(4)]
# The human and alien networks come from very different distributions. As you can see from the Stochastic Block Model structure below, the regions in the human and the alien brains can both be separated into two communities. These communities represent the two hemispheres of the brain (who knew aliens also have bilateralized brains!). Although both humans and aliens have the same regions belonging to their respective hemispheres, as we planned, the alien networks have a strange property: their brain regions have more connections with regions in the opposite hemisphere than the same one.
# In[3]:
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import ImageGrid
from graspologic.plot import binary_heatmap, adjplot, heatmap
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
def lined_heatmap(data, binary=True, lines_every_n=None, alpha=.8, *args, **kwargs):
if binary:
ax = binary_heatmap(data, *args, **kwargs)
else:
ax = heatmap(data, *args, **kwargs)
if lines_every_n is None:
n = len(data) // 2
else:
n = lines_every_n
ax.vlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=alpha)
ax.hlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=alpha)
return ax
def add_legend(ax=None, legend_labels=["No Edge", "Edge"], colors=["white", "black"], bbox_to_anchor=(1.15, .5), **kwargs):
fig = plt.gcf()
if ax is None:
ax = plt.gca()
patches = []
for c, l in zip(colors, legend_labels):
patches.append(mpl.patches.Patch(facecolor=c, label=l, edgecolor="black"))
fig.legend(
patches,
legend_labels,
facecolor="white",
edgecolor="black",
framealpha=1,
fontsize="x-large",
loc="center right",
bbox_to_anchor=bbox_to_anchor,
**kwargs
)
fig = plt.figure(figsize=(14,7))
grid1 = ImageGrid(fig, 121, (2, 2), axes_pad=.1, share_all=True)
grid2 = ImageGrid(fig, 122, (2, 2), axes_pad=.1, share_all=True)
for i, (axi, axj) in enumerate(zip(grid1, grid2)):
hmn = lined_heatmap(humans[i], ax=axi, legend=False, outline=True)
hma = lined_heatmap(aliens[i], ax=axj, legend=False, outline=True)
grid1.axes_all[0].set_title("Human Brain Networks", fontsize=24, y=1.05, loc="left")
grid2.axes_all[0].set_title("Alien Brain Networks", fontsize=24, y=1.05, loc="left")
add_legend(grid2.axes_all[2])
plt.tight_layout(w_pad=3)
# ## Different ways to Embed the Networks
# Remember, our goal is to find community structure common to both humans and aliens, and in our case that community structure is the brain hemispheres. We're going to try to to embed our brain networks into some lower-dimensional space - that way, we can use standard clustering methods from machine learning to figure out which regions are grouped. Try to think about how you might find a lower-dimensional embedding where the location of each node's latent positions uses information from all of the networks.
# ### Averaging Separately
# The first idea you might come up with is to average your networks together, and then embed the result of that averaging with Spectral Embedding. It turns out that this is actually the right idea in the very special case where all of your networks come from the same probability distribution. In our case, we'll try averaging our groups of networks separately: we'll treat the human networks as one group, and the alien networks as another group, and we'll average each independently. In the end, we'll have two separate embeddings.
# In[4]:
from graspologic.embed import AdjacencySpectralEmbed as ASE
# Compute the average adjacency matrix for
# human brains and alien brains
human_mean_network = np.array(humans).mean(axis=0)
alien_mean_network = np.array(aliens).mean(axis=0)
# Embed both matrices
ase = ASE(n_components=2)
human_latents = ase.fit_transform(human_mean_network)
alien_latents = ase.fit_transform(alien_mean_network)
# Below, you can see what happens when we embed the averaged human and alien networks separately. Like all of our embedding plots, each dot represents the latent positions for a particular node.
# In[5]:
import seaborn as sns
def plot_latents(latent_positions, *, title=None, labels=None, ax=None, legend=True,
fontdict=None, **kwargs):
if ax is None:
ax = plt.gca()
plot = sns.scatterplot(latent_positions[:, 0], latent_positions[:, 1], hue=labels,
s=10, ax=ax, palette="Set1", color='k', **kwargs)
if title is not None:
plot.set_title(title, wrap=True, fontdict=fontdict, loc="left");
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
h, _ = plot.get_legend_handles_labels()
if legend and h:
ax.legend(title="Community")
elif not legend and np.any(labels):
ax.get_legend().remove()
return plot
# plot
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
plot_latents(human_latents, title="Embedding when we average the human \nnetworks", ax=axs[0]);
plot_latents(alien_latents, title="Embedding when we average the alien \nnetworks", ax=axs[1]);
# Both of these embeddings have clear clustering: there are two communities of nodes in both the human and the alien networks. We can recover the labels for these communities fairly easily using our pick of unsupervised clustering method. We know that the latent positions in each community of an Adjacency Spectral Embedding are normally distributed under this simulation setting, and we have two communities. That means that the above embeddings are distributed according to a Gaussian Mixture. Here, "Gaussian" just means "normal", and a gaussian mixture just means that we have groups of normally distributed data clusters. As a result, it makes sense to cluster these data using scikit-learn's GaussianMixture implementation. We'll also use graspologic's `remap_labels` utility function to make sure that index of the nodes matches for both our predicted alien and human labels.
# In[6]:
from sklearn.mixture import GaussianMixture as GMM
from graspologic.utils import remap_labels
# Predict labels for the human and alien brains
human_labels = GMM(n_components=2).fit_predict(human_latents)
alien_labels = GMM(n_components=2).fit_predict(alien_latents)
# Make corresponding communities have the same values
alien_labels = remap_labels(human_labels, alien_labels)
# You can see a plot that predicts our community structure below. Success! When we embed the human and the alien networks separately, averaging them clearly lets us cluster the brain regions by hemisphere. However, as you can see, the colors are flipped: the communities are in different places relative to each other. This is because the alien networks are drawn from a different distribution than the human networks.
# In[7]:
fig, axs = plt.subplots(ncols=2, figsize=(10, 5))
plot_latents(human_latents, title="Clustering our averaged human network \nembedding with a GMM",
labels=human_labels, ax=axs[0], legend=False)
plot_latents(alien_latents, title="Clustering our averaged alien network \nembedding with a GMM",
labels=alien_labels, ax=axs[1], legend=False)
plt.legend(loc=(1.15, .4), fontsize="x-large", title="Community",
title_fontsize=16);
# ### Averaging Together
# But what if you wanted to embed *all* of the networks into the same space, both the human and the alien networks, so that there's only one plot? Let's try it. We'll take all of the networks and then average them together, and then do an Adjacency Spectral Embedding. This will result in a single plot, with each point representing a single brain region. Do you think we'll still find this nice community separation?
# In[8]:
total_mean_matrix = np.array(humans + aliens).mean(axis=0)
all_latents = ase.fit_transform(total_mean_matrix)
# In[9]:
plot_latents(all_latents, title="Embedding when we average everything together");
# Nope, bummer. Our community separation into discrete hemispheres is gone - the human networks and the alien networks cancelled each other out. As far as anybody can tell, our latent positions have just become meaningless noise, so we can't cluster and find communities like we did before.
# #### Why Did Averaging Together Fail?
# Why did this happen? Well, let's go back and compare one human brain network with one alien brain network.
# In[10]:
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
hmn = lined_heatmap(humans[0], ax=axs[0], legend=False, title="One Human Brain Network")
hma = lined_heatmap(aliens[0], ax=axs[1], legend=False, title="One Alien Brain Network")
add_legend(humans[0])
plt.tight_layout()
# The human network has more edges in the upper-left and lower-left quadrants of the heatmap. This implies that two regions in the same hemisphere are more likely to be connected for humans than two regions in opposite hemispheres.
#
# The alien network tells a different story. For aliens, two regions in opposite hemispheres are more likely to be connected than two regions in the same hemisphere.
#
# But what happens when you average these two adjacency matrices together?
# In[11]:
combined = np.array([humans[0], aliens[0]])
averaged = np.mean(combined, axis=0)
# In[12]:
from graspologic.plot import heatmap
# plot
fig, ax = plt.subplots()
cmap = plt.get_cmap('Greys', 3)
hm = heatmap(averaged, title="Averaged Brain Network", cbar=False, cmap=cmap, center=None, ax=ax);
sns.despine(ax=hm, top=False, bottom=False, left=False, right=False)
hm.vlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
hm.hlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
# # colorbar
add_legend(hm, legend_labels=["Edge in no networks", "Edge in one network", "Edge in both networks"],
colors=["white", "grey", "black"], bbox_to_anchor=(1.3, .5))
# cax = fig.add_axes([.8, 0.25, 0.05, 0.5])
# im = ax.imshow(averaged, cmap=cmap, vmin=-0.2, vmax=1.2)
# cbar = plt.colorbar(im, cax=cax, ticks=[0, .5, 1], )
# cbar.set_ticklabels(["Edge in no networks", "Edge in one network", "Edge in both networks"])
# By averaging, we've lost all of the community structure used to exist. That's why our big averaged embedding failed.
#
# We've just discovered that even though it's oten a great idea to simply average all of your networks together - for example, if they were drawn from the same distribution - it's often a horrible idea to average all of your networks if they might come from different distributions. This is a case of averaging networks which are "heterogeneous": Not only are your networks slightly different, but they're *expected* to be different because, again, they're drawn from different distributions. Sampling a lot of heterogenous networks and then averaging them, as you can see from our exploration above, can result in losing the community signal you might have had.
#
# We'd like to find a way to compare these heterogeneous networks directly, so that we can embed all of our networks into the same space and still keep that nice community structure. Figuring out the best way to do this is a topic under active research, and the set of techniques and tools that have developed as a result are together called multiple-network representation learning.
# ## Different Types of Multiple-Network Representation Learning
# Let's take a moment to explore some of the possible general approaches we could take in multiple-network representation learning. At some point we need to combine the many individual representations of our networks into one, and there are at least three possible places where we could do this: combining the networks together, combining the networks separately, and combining the embeddings. Each of these eventually results in a latent position representation for our networks. It's important to note that in all of these approaches, we're simply learning representations for our groups of networks. You can do whatever you want with these representations; in our case, we'll illustrate that we can use them to classify our nodes.
# ### Combining the Networks Together
# With this approach, you'll start with a set of networks, and then you'll combine them all into a single network prior to doing anything else. You can then embed and classify this network directly. What we did before, averaging the human and alien networks, was an example of combining our networks -- we just averaged all of our adjacency matrices, and then we embedded the result.
# In[13]:
from graspologic.embed import MultipleASE as MASE
from graspologic.embed import OmnibusEmbed as OMNI
from graspologic.embed.omni import _get_omni_matrix
from graspologic.plot import heatmap
fig = plt.figure();
def rm_ticks(ax, x=False, y=False, **kwargs):
if x is not None:
ax.axes.xaxis.set_visible(x)
if y is not None:
ax.axes.yaxis.set_visible(y)
sns.despine(ax=ax, **kwargs)
fig = plt.figure();
# add stack of heatmaps
for i in range(4):
ax = fig.add_axes([.02*i, -.02*i, .8, .8])
ax = binary_heatmap(humans[i], ax=ax, legend=False)
if i == 0:
ax.set_title("Adjacency Matrices", loc="left", fontsize=16)
rm_ticks(ax, top=False, right=False)
ax.vlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
ax.hlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
# add arrow
arrow_ax = fig.add_axes([.8, .3, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add joint matrix
omni_ax = fig.add_axes([1, -.02*3, .8, .8])
A = human_mean_network.copy()
a_hm = heatmap(A, ax=omni_ax, cbar=False)
a_hm.set_title("Joint Matrix", loc="left", fontsize=16)
for _, spine in a_hm.spines.items():
spine.set_visible(True)
# add second arrow
arrow_ax = fig.add_axes([1.75, .3, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add averaged embedding
omni_embed_ax = fig.add_axes([2.1, -.02*3, .55, .8])
plot_latents(human_latents, ax=omni_embed_ax, title="Joint Embedding",
fontdict={'fontsize': 16})
rm_ticks(omni_embed_ax, top=False, right=False)
# add third arrow
arrow_ax = fig.add_axes([2.7, .3, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# classify
mase_ax = fig.add_axes([3.05, -.02*3, .55, .8])
plot_latents(human_latents, ax=mase_ax, title="Classification",
fontdict={'fontsize': 16}, labels=human_labels, legend=False);
# plt.suptitle("Combining the Networks", x=2, y=1.1, fontsize=26);
# ### Combining The Networks Separately
# The above approach is nice for collapsing our information into a single embedding -- with each point in our final embedding representing a single node of our network. However, there are situations in which we might want to keep our embeddings separate, but make sure that they're in the same latent space -- meaning, the embeddings aren't rotations of each other. That way, we can directly compare the embeddings of our separate embeddings.
# In[14]:
from graspologic.embed import MultipleASE as MASE
from graspologic.embed import OmnibusEmbed as OMNI
from graspologic.embed.omni import _get_omni_matrix
from graspologic.plot import heatmap
def rm_ticks(ax, x=False, y=False, **kwargs):
if x is not None:
ax.axes.xaxis.set_visible(x)
if y is not None:
ax.axes.yaxis.set_visible(y)
sns.despine(ax=ax, **kwargs)
fig = plt.figure();
# add stack of heatmaps
for i in range(4):
ax = fig.add_axes([.02*i, -.02*i, .8, .8])
ax = binary_heatmap(humans[i], ax=ax, legend=False)
if i == 0:
ax.set_title("Adjacency Matrices", loc="left", fontsize=16)
rm_ticks(ax, top=False, right=False)
ax.vlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
ax.hlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
# add arrow
arrow_ax = fig.add_axes([.8, .3, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add joint matrix
omni_ax = fig.add_axes([1, -.02*3, .8, .8])
A = _get_omni_matrix(humans[:2]+aliens[:2])
a_hm = heatmap(A, ax=omni_ax, cbar=False)
a_hm.set_title("Joint Matrix", loc="left", fontsize=16)
for _, spine in a_hm.spines.items():
spine.set_visible(True)
# add second arrow
arrow_ax = fig.add_axes([1.75, .3, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add omni embedding
latents_omni = OMNI(n_components=2).fit_transform(humans[:2]+aliens[:2])
for i, embedding in enumerate(latents_omni):
ax = fig.add_axes([2.1+.02*i, -.02*i, .55, .8])
if i == 0:
ax.set_title("Separate Combination", loc="left", fontsize=16)
plot = sns.scatterplot(embedding[:, 0], embedding[:, 1],
s=10, ax=ax, color="black")
rm_ticks(ax, top=False, right=False)
# ### Combining the embeddings
# The final approach to multiple-network representation learning that we'll talk about is combining the embeddings themselves. With this approach, you're waiting until you've already embnedded all of your networks separately before you combine them, either with Adjacency Spectral Embedding or with some other single-network embedding method. Multiple Adjacency Spectral Embedding, which we'll be talking about soon, is an example of this approach.
# In[15]:
fig = plt.figure()
# add stack of heatmaps
for i in range(4):
ax = fig.add_axes([.02*i, -.02*i, .5, .5])
ax = binary_heatmap(humans[i], ax=ax, legend=False)
if i == 0:
ax.set_title("Adjacency Matrices", loc="right")
rm_ticks(ax, top=False, right=False)
ax.vlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
ax.hlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
# add arrow
arrow_ax = fig.add_axes([.5, .2, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add stack of latent plots
for i in range(4):
ax = fig.add_axes([.8+.02*i, -.02*i, .35, .5])
if i == 0:
ax.set_title("Separate Embeddings")
latents = ase.fit_transform(humans[i])
plot = sns.scatterplot(latents[:, 0], latents[:, 1],
s=10, ax=ax, color="black")
rm_ticks(ax, top=False, right=False)
# add second arrow
arrow_ax = fig.add_axes([1.25, .2, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add group embeddings
mase = MASE(n_components=2)
latents_mase = mase.fit_transform(humans + aliens)
mase_ax = fig.add_axes([1.57, -.03, .35, .5])
plot_latents(latents_mase, ax=mase_ax, title="Joint Embedding")
rm_ticks(mase_ax, top=False, right=False)
# add third arrow
arrow_ax = fig.add_axes([1.95, .2, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# classify
labels_normal = GMM(n_components=2).fit_predict(human_latents)
mase_ax = fig.add_axes([2.27, -.03, .35, .5])
plot_latents(latents_mase, ax=mase_ax, title="Classification",
labels=labels_normal, legend=False)
plt.suptitle("Combining the Embeddings", x=1.4, y=.7, fontsize=20);
# For the rest of this section, we'll explore the strengths and weaknesses of different particular techniques which use these approaches. The first we'll look at is combines the embeddings, like above. It's called Multiple Adjacency Spectral Embedding, or MASE for short.
# ## Multiple Adjacency Spectral Embedding
# MASE is a technique which combines embeddings by concatennating and re-embedding the separate latent positions into a single space. It's nice because you don't actually need each network to be generated from the same distribution - you only need the nodes of the different networks to be aligned and for them to belong to the same communities.
#
# MASE is probably the easiest to understand if you know how Adjacency Spectral Embeddings work. Say you have some number of networks, and (like we said above) their nodes are aligned. The goal of MASE is to embed the networks into a single space, with each point in that space representing a single node - but, unlike simply averaging, MASE lets you combine networks which aren't necessarily drawn from the same distribution. MASE is based on the common subspace independent-edge (COSIE) model from the multi-network models section of chapter 5, so we're operating under the assumption that there *is* some low-dimensional space common to all of our networks that we can embed into in the first place.
#
# Let's go back to our group of human and alien brains and try using MASE to embed them rather than averaging. Then, we'll dive deeper into what's going on under the hood. First, we'll instantiate a MASE classifier and embed down to two dimensions. Then we'll create a combined list of the human and alien brains, and use MASE to find the latent positions.
# In[16]:
from graspologic.embed import MultipleASE as MASE
# Use MASE to embed everything
mase = MASE(n_components=2)
latents_mase = mase.fit_transform(humans + aliens)
# In[17]:
plot_latents(latents_mase,
title="Embedding when we use MASE on the group \nof all human and alien networks",
labels=labels);
# Unlike the disastrous results from simply averaging all of our networks together, MASE manages to keep the community structure that we found when we averaged our networks separately. Let's see what's under the hood.
# ### How Does MASE Work?
# Below, you can see how MASE works. We start with networks, drawn as nodes in space connected to each other. We turn them into adjacency matrices, and then we embed the adjacency matrices of a bunch of networks separately, using our standard Adjacency Spectral Embedding algorithm. Then, we take all of those embeddings, concatenate horizontally into a single matrix, and embed the entire concatenated matrix. The colors are the true communities each node belongs to: there's a red and an orange community. MASE is an unsupervised learning technique and so it doesn't need any information about the true communities to embed, but they're useful to see.
# ```{figure} ../../Images/mase1.jpeg
# ---
# height: 400px
# name: mase-fig
# ---
# The MASE algorithm
# ```
# #### A Collection of Networks
# We'll illustrate what's happening in the MASE algorithm by running through all of its steps ourselves, with a set of example networks.
#
# Suppose we have a set of networks generated from Stochastic Block Models with two communities in each network. The networks have aligned nodes -- meaning that the $i_{th}$ row of all of their adjacency matrices represent the edges for the same node $i$. The nodes also all belong to the same communities. However, edge probabilities might change depending on the network. In the first network, you might have nodes in the same community having a high chance of connecting to each other, whereas in the second network, nodes are much more likely to be connected to other nodes in different communities. You want to end up with a classification that distinctly groups the nodes into their respective communities, using the information from all of the networks. Because MASE takes approach of combining the embeddings, we start by embedding each network separately with an Adjacency Spectral Embedding.
#
# Below is Python code which generates four networks with Stochastic Block Models. Each of the networks is drawn from a different distribution (the block probability matrices are different), but the labels are the same across the networks (which means that nodes have a consistent community no matter which network you're looking at). If you're interested in the particular parameters used to generate these SBMs, you can see them in the code below.
# In[18]:
import numpy as np
from graspologic.simulations import sbm
n = 100
p1, p2, p3 = .12, .06, .03
A1, labels = make_sbm(p1, p3, p3, p1,
return_labels=True)
A2 = make_sbm(p1, p3, p3, p2)
A3 = make_sbm(p3, p2, p2, p3)
A4 = make_sbm(p1, p3, p3, p3)
networks = [A1, A2, A3, A4]
# In[19]:
fig, axs = plt.subplots(2, 2, figsize=(7,7))
for i, (ax, graph) in enumerate(zip(axs.flat, networks)):
hmap = binary_heatmap(graph, ax=ax, legend=False, title=f"network {i+1}")
for spine in ax.spines.values():
spine.set_visible(True)
hmap.vlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
hmap.hlines(n, 0, n*2, colors="black", lw=.9, linestyle="dashed", alpha=.8)
plt.suptitle("Four different networks", fontsize=26, y=1)
fig.subplots_adjust(hspace=.05, wspace=.05)
add_legend(A1, bbox_to_anchor=(1.2, .5))
plt.tight_layout()
# #### Embedding our networks
# Next, we embed each of the four networks separately using Adjacency Spectral Embedding. This step is pretty straightforward, so we won't dive into it too much: remember, we're combining the embeddings, not the networks, so we're not doing anything fancy. The python code below just groups the four networks into a list, and then loops through the list, embedding each network into two dimensions and saving the resulting embeddings into a variable.
# In[20]:
from graspologic.embed import AdjacencySpectralEmbed as ASE
networks = [A1, A2, A3, A4]
latents_mase = []
for network in networks:
ase = ASE(n_components=2)
latent = ase.fit_transform(network)
latents_mase.append(latent)
# In[21]:
fig, axs = plt.subplots(2, 2, figsize=(7,7), sharex=True, sharey=True)
for i, ax in enumerate(axs.flat):
plot_latents(latents_mase[i], title=f"Embedding for network {i+1}",
labels=labels, ax=ax, legend=False)
ax.yaxis.set_major_locator(plt.MaxNLocator(3))
plt.suptitle("Adjacency Spectral Embedding for our four networks", fontsize=20);
h, l = ax.get_legend_handles_labels()
fig.legend(h, l, loc='center right', bbox_to_anchor=(1.25, .5),
prop={'size': 15}, title="Community", title_fontsize='x-large');
fig.supxlabel("Dimension 1")
fig.supylabel("Dimension 2");
plt.tight_layout()
# It's important to keep in mind that these embeddings don't live in the same *latent space*. What this means is that averaging these networks together would result in essentially meaningless noise. This is because of the rotational invariance of latent positions: you can only recover the latent positions of any network up to a rotation.
# #### Combining our embeddings
# Now comes the interesting part. Our goal is to find some way to take each of these individual embeddings and combine them. We want to find a reasonable way of doing this.
#
# We can visualize each of our four embeddings a different way. Instead of the using the two latent position dimensions as the x-axis and the y-axis of our plot, we can just visualize our latent position matrices directly. Each latent position now corresponds to rows in one of these matrices. The two columns are the two latent position dimensions, and the two colors in each row corresponds to the latent position value. We're essentially substituting location for color.
# In[22]:
import matplotlib.cm as cm
from matplotlib.colors import Normalize
cmap = 'rocket_r'
fig, axs = plt.subplots(ncols=4, figsize=(16, 8), sharex=True, sharey=True)
for i, ax in enumerate(axs.flat):
hm = sns.heatmap(latents_mase[i], cmap=cmap,
ax=ax, yticklabels=50, cbar=False)
hm.set_title(f"Embedding for network {i+1}", fontdict={'fontsize': 10})
fig.supxlabel("Dimension", x=.42, fontsize=16)
fig.supylabel("Latent Position", x=.005, fontsize=16)
fig.suptitle("Latent position matrices for our four embeddings", x=.42, fontsize=20)
fig.tight_layout(w_pad=2)
vmin, vmax = np.array(latents_mase).min(), np.array(latents_mase).max()
norm = Normalize(vmin=vmin, vmax=vmax)
im = cm.ScalarMappable(cmap=cmap, norm=norm)
fig.colorbar(im, ax=axs);
# Because the rows of these matrices are all aligned - meaning, row 0 corresponds to node 0 for all four matrices - we can actually think of each node as having (in this case) eight latent position dimensions: two for each of our four networks. Eight is a somewhat arbitrary number here: each network contributes two dimensions simply because we originally chose to embed all of our networks down to two dimensions with ASE, and the number of networks is of course even more arbitrary. You'll usually have more than four.
#
# In the more general sense, we can think of each node as having $m \times d$ latent position dimensions, where $m$ is the number of networks, and $d$ is the number of dimensions we embed each network into. We don't actually need separate matrices to express this idea: the natural thing to do would be to just concatenate all of the matrices horizontally into a single $m \times d$ matrix.
# In[23]:
# Concatenate our four matrices horizontally into a single m by d matrix
concatenated = np.hstack(latents_mase)
# In[24]:
fig, ax = plt.subplots(figsize=(16, 8))
hm = sns.heatmap(concatenated, cmap=cmap, ax=ax, yticklabels=50);
hm.set_title(f"Combined embedding for all four networks", fontdict={'fontsize': 20});
hm.set_xlabel("Dimension", fontsize=16)
hm.set_ylabel("Latent Position", fontsize=16);
# #### Embedding our Combination To Create a Joint Embedding
# So now we have a combined representation for our separate embeddings, but we have a new problem: our latent positions suddenly have way too many dimensions. In this example they have eight (the number of columns in our combined matrix), but remember that in general we'd have $m \times d$. This somewhat defeats the purpose of an embedding: we took a bunch of high-dimensional objects and turned them all into a single high-dimensional object. Big whoop. We can't see what our combined embedding look like in euclidean space, unless we can somehow visualize $m \times d$ dimensional space (hint: we can't). We'd like to just have `d` dimensions - that was the whole point of using `d` components for each of our Adjacency Spectral Embeddings in the first place!
#
# There's an obvious solution here: why don't we just embed *again*? Nothing stops us from doing a Singular Value Decomposition on a nonsquare matrix, and so we can just create a joint embedding of our combined matrix and go back down to a healthy $d$ columns.
# In[25]:
from graspologic.embed import selectSVD
joint_embedding, *_ = selectSVD(concatenated, n_components=2)
# In[26]:
from matplotlib.gridspec import GridSpec
# TODO: add legend
fig = plt.figure(figsize=(12, 8))
gs = GridSpec(1, 3)
axm = fig.add_subplot(gs[0])
axs = fig.add_subplot(gs[1:])
# Matrix representation
hm = sns.heatmap(joint_embedding, cmap=cmap, ax=axm, yticklabels=50, cbar=True,
cbar_kws={"shrink": .91})
hm.set_title(f"Matrix visualization of our \nJoint Embedding", fontdict={'fontsize': 14}, loc='left')
hm.set_xlabel("Dimension", fontdict={"fontsize": 14})
hm.set_ylabel("Latent Positions", fontdict={"fontsize": 14})
# Euclidean representation
splot = sns.scatterplot(joint_embedding[:, 0], joint_embedding[:, 1], ax=axs, hue=labels,
palette="Set1", edgecolor=None, s=10)
splot.set_xlabel("Dimension 0", fontdict={"fontsize": 14})
splot.set_ylabel("Dimension 1", fontdict={"fontsize": 14})
splot.set_title("Euclidean visualization of our Joint Embedding", loc="left", fontdict={"fontsize": 14})
h, l = splot.get_legend_handles_labels()
splot.legend(title='Community', handles=h, labels=["a", "b"])
# fig title
plt.suptitle("Two Visualizations For Our Joint Embedding", fontsize=20)
plt.tight_layout()
# Looks like this idea worked well - Our nodes are clearly grouped into two distinct communities, and all of our networks were drawn from the same distribution! To reiterate, what we did was:
# 1. Embed each of our four networks separately into two-dimensional space
# 2. Think of all of the resulting latent positions for a particular node as a single vector
# 3. With the intuition from 2, horizontally concatenate our four latent position matrices into a single matrix
# 4. embed that new matrix down to 2 dimensions
# ### Using Graspologic
# In practice, you don't actually have to implement any of this stuff yourself. Graspologic's MultipleASE class implements it all for you under the hood. You can see the embedding below - you give MultipleASE a list of networks, and it spits out a set of joint latent positions. Graspologic's implementation of MASE is doing pretty much exactly what we just did: it embeds all of the networks you pass in, concatenates them horizontally, and then re-embeds the concatenated matrix. You can see this in the figure -- MASE's embedding looks just like the one we made above.
# In[27]:
from graspologic.embed import MultipleASE as MASE
mase = MASE(n_components=2)
latents = mase.fit_transform(networks)
# In[28]:
plot_latents(latents, title="MASE embedding", labels=labels);
# ### Score Matrices*
# Exactly how is the joint embedding we created related to all of separate, original networks? Well, to understand this, we need to introduce the concept of *score matrices*.
#
# In MASE, each network is associated with its own score matrix. Just like the joint embedding describes how the networks are similar, the score matrices describe how each network is different.
#
# Suppose we have a set of networks with adjacency matrices $A^{(1)}, ..., A^{(m)}$, with each network being unweighted. In the joint embedding we made before, for instance, we had $m=4$.
#
# Now, we run MASE using the method described above, and we get a joint embedding $V$. Then each adjacency matrix, $A^{(i)}$, can be decomposed into $VR^{(i)} V^\top$, where $R^{(i)}$ is the score matrix corresponding to the $i_{th}$ network:
#
# \begin{align*}
# A^{(i)} = VR^{(i)} V^\top
# \end{align*}
#
# This is how the score matrix of a particular network $R^{(i)}$ and the single joint embedding $V$ is related to the original network $A^{(i)}$.
# #### Finding Score Matrices
# Any particular score matrix, $R^{(i)}$, is square and $d \times d$. The dimension, $d$, corresponds to the number of embedding dimensions -- so if we wanted to embed down to two dimensions, each $R^{(i)}$ would be a $2 \times 2$ matrix.
#
# Now, here's the interesting part: how do we find our score matrices? Well, there's a theorem in linear algebra about matrices which are *orthogonal*, meaning that the columns all perpendicular to each other. This theorem says that the inverse of an orthogonal matrix is its transpose. So, for an orthogonal matrix $O$,
#
# \begin{align*}
# O^\top = O^{-1}
# \end{align*}
#
# Interestingly, the column-vectors of our joint embedding matrix (let's call it $V$) are all perpendicular. Since definitionally, what it means for two vectors to be perpendicular is that they have a dot product of 0, we can check this below:
# In[29]:
V = joint_embedding.copy()
# Take the dot product of the columns of our joint latent position matrix
np.round(V[:, 0] @ V[:, 1])
# What this all means is that $V^\top V$ is just the identity matrix $I$.
# In[30]:
np.round(V.T@V)
# and so, finally, we can use the above two facts to find the score matrix for a particular network. We just take our original formula $A^{(i)} = VR^{(i)} V^\top$, left-multiply by $V^\top$, and right-multiply by $V$.
#
# \begin{align*}
# A^{(i)} &= VR^{(i)} V^\top \\
# V^{\top} A^{(i)} V &= (V^\top V) R^{(i)} (V^\top V) \\
# V^\top A^{(i)} V &= R^{(i)}
# \end{align*}
#
# Below, we turn the list of four networks we already embedded into a 3-D numpy array, and then do the above multiplication to get a new 3D numpy array of scores matrices. Because we embedded into two dimensions, each score matrix is $2 \times 2$, and the four score matrices are "slices" along the 0th axis of the numpy array.
# In[31]:
networks_array = np.asarray(networks)
scores = V.T @ networks_array @ V
scores.shape
# Now, here's something interesting: it turns out that we can estimate the edge probability matrix which generated any graph with $ P^{(i)} = V R^{(i)} V^\top$.
# In[32]:
P_0 = V @ scores[0] @ V.T
# Below and to the left, you can see the original adjacency matrix for the first matrix. In the center, you can see the heatmap for the first network's score matrix. Next to it, you can see the recreation of the first network. Remember that we only used the score matrix to recreate it. The first network has a block probability matrix of
#
# \begin{align}
# \begin{bmatrix}
# .12 & .03 \\
# .03 & .06 \\
# \end{bmatrix}
# \end{align}
#
# and so we should expect the edges in the top-left block of our adjacency matrix to be more connected, the edges in the two off-diagonal blocks to not be very connected, and the edges in the bottom-right block to be kind of connected.
# In[33]:
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
lined_heatmap(networks[0], title="The original adjacency matrix \nfor the first network",
ax=axs[0], legend=False)
lined_heatmap(scores[0], binary=False, cbar=False, title="Score matrix for the first network", ax=axs[1])
lined_heatmap(P_0, binary=False, cbar=False, title="Estimated edge probabilities \nfor the first network", ax=axs[2])
plt.tight_layout()
# So we've learned that MASE is useful when you want a joint embedding that combines all of your networks together, and when you want to estimate edge probabilities for one of your networks. What if we wanted to keep our separate embeddings, but put them all in the same space? That's what the Omnibus Embedding gives, and what we'll explore now.
# ## Omnibus Embedding
# The Omnibus Embedding combines networks separately to put them all into the same latent space. What this means is that the embeddings for each network after the omnibus embedding are *directly comparable*: none of the embeddings are rotations of each other, and distances between nodes across embeddings actually means something. You can use the omnibus embedding to answer a variety of questions about the interacting properties of a collection of networks. For example, you could figure out which nodes or subgraphs are responsible for similarities or differences across your networks, or you could determine whether subcommunities in your networks are statistically similar or different. You could try to figure out which underlying parameters of your network are the same, and which are different.
#
# In the next section, we'll explore how the Omnibus Embedding works. Sections in future chapters will explore some the things you can do with your separate embeddings to learn about your networks.
# ### OMNI on our four networks
# We'll begin with an example. Let's go back to the four networks we created in the MASE section and look at their embeddings. Notice that they're all *rotations* of each other - this is because of the nonidentifiability problem in spectral embeddings.
# ```{admonition} Non-Identifiability
# Let's take a network generated from an RDPG with $n$ nodes. Each of these $n$ nodes is associated with a latent position vector, corresponding to that node's row in the network's embedding. What it means for a node to have a latent position vector is that the probability for an edge to exist between two nodes $i$ and $j$ is the dot product of their latent position vectors.
#
# More specifically, if $\textbf{P}$ is a matrix of edge probabilities, and $\textbf{X}$ is our latent position matrix, then $\textbf{P} = \textbf{X} \textbf{X}^\top$.
#
# The nonidentifiability problem is as follows: Take any orthogonal matrix (a matrix which only rotates or flips other matrices). Call it $\textbf{W}$. By definition, the transpose of any orthogonal matrix is its inverse -- $\textbf{W} \textbf{W}^\top = \textbf{I}$, where $\textbf{I}$ is the identity matrix. So,
#
# \begin{align}
# P &= \textbf{X} \textbf{X}^\top
# &= \textbf{X} \textbf{I} \textbf{X}^\top
# &= (\textbf{X} \textbf{W}) (\textbf{W}^\top \textbf{X}^\top)
# &= (\textbf{X} \textbf{W}) (\textbf{X} \textbf{W})^\top
# \end{align}
#
# What this means is that you can take any latent position matrix and rotate it, and the rotated version will still create the same matrix of edge probabilities. So, when you try to estimate latent positions, separate estimations will potentially produce rotated versions of each other.
#
# This is very bad in situations where you're trying to directly compare more than one embedding. You wouldn't be able to figure out the average position of a node, for instance, when you have multiple embeddings of that node.
# ```
# You can see the nonidentifiability problem in action below. The embeddings for network 1 and for network 2 are particularly illustrative; community 0 is generally top in network 1, but on the right in network two. There isn't a way to compare any two nodes directly. Another way to say this is that, right now, all of our embeddings live in different *latent spaces*: direct comparison between embeddings for nodes in network 1 and nodes in network 2 isn't possible. You can also see the latent position corresponding to the first node as a big circle in each network so that you can track a single point.
# In[34]:
fig, axs = plt.subplots(2, 2, figsize=(7,7), sharex=True, sharey=True)
for i, ax in enumerate(axs.flat):
plot_latents(latents_mase[i], title=f"Embedding for network {i+1}",
labels=labels, ax=ax, legend=False)
_x, _y = np.array(latents_mase)[i, 0]
ax.plot(_x, _y, 'ro', markersize=10, linewidth=1, markeredgecolor='k')
ax.yaxis.set_major_locator(plt.MaxNLocator(3))
plt.suptitle("Adjacency Spectral Embedding for our four networks", fontsize=20);
h, l = ax.get_legend_handles_labels()
fig.legend(h, l, loc='center right', bbox_to_anchor=(1.25, .5),
prop={'size': 15}, title="Community", title_fontsize='x-large');
fig.supxlabel("Dimension 1")
fig.supylabel("Dimension 2");
plt.tight_layout()
# ### OMNI on our four heterogeneous networks
# Let's see what happens when, instead of embedding our networks separately as above, we find their latent positions with an Omnibus Embedding. Again, we'll plot a particular node with a circle so that we can track it across embeddings.
# In[35]:
from graspologic.embed import OmnibusEmbed
omni = OmnibusEmbed(n_components=2)
latents_omni = omni.fit_transform(networks)
# In[36]:
fig, axs = plt.subplots(2, 2, figsize=(7,7), sharex=True, sharey=True)
for i, ax in enumerate(axs.flat):
plot_latents(latents_omni[i], title=f"OMNI Embedding for network {i+1}",
labels=labels, ax=ax, legend=False)
_x, _y = latents_omni[i, 0]
ax.plot(_x, _y, 'ro', markersize=10, linewidth=1, markeredgecolor='k')
plt.suptitle("Omnibus Embedding for our four networks", fontsize=20);
h, l = ax.get_legend_handles_labels()
fig.legend(h, l, loc='center right', bbox_to_anchor=(1.25, .5),
prop={'size': 15}, title="Community", title_fontsize='x-large');
fig.supxlabel("Dimension 1")
fig.supylabel("Dimension 2");
plt.tight_layout()
# As you can see, unlike when we embedded the four networks separately, the clusters created by the Omnibus Embedding *live in the same space*: you don't have to rotate or flip your points to line them up across embeddings. The cluster of blue points is always in the top left, and the cluster of red points is always in the bottom right.
# ### How Does OMNI work?
# At a high level, the omnibus embedding is fairly simple. It:
# 1. Combines the adjacency matrices for all of our networks into a single, giant matrix (the Omnibus Matrix)
# 2. Embeds that matrix using a standard Adjacency or Laplacian Spectral Embedding.
#
# The omnibus matrix itself just has every original adjacency or laplacian matrix along its diagonal, and the elementwise average of every pair of original matrices on the off-diagonals. This means that the Omnibus Matrix is *huge*: if you have $m$ networks, each of which has $n$ nodes, the Omnibus Matrix will be a $mn \times mn$ matrix.
#
# For example, say we only have two networks. Let's name their adjacency matrices $A^{(1)}$ and $A^{(2)}$. Then, the omnibus embedding looks like this:
#
# \begin{align}
# \begin{bmatrix}
# A^{(1)} & \frac{A^{(1)} + A^{(2)}}{2} \\
# \frac{A^{(2)} + A^{(1)}}{2} & A^{(2)} \\
# \end{bmatrix}
# \end{align}
#
# where each entry on the diagonal is itself a matrix. In general, when we have $m$ networks, the $i_{th}$ diagonal entry is $A^{(i)}$ and the $(i, j)_{th}$ entry is $\frac{A^{(i)} + A^{(j)}}{2}$. What this means is that you just stick each of your adjacency matrices on the diagonal of a large matrix, and you fill in the off-diagonals with the averages of each pair of two adjacency matrices.
#
# You can see this in code below. Below, we just use numpy's block function to generate our simple Omnibus Matrix from two networks.
# In[37]:
a0, a1 = networks[0], networks[1]
omni = np.block([[a0, (a0+a1)/2],
[(a1+a0)/2, a1]])
# Below you can see the resulting Omnibus Matrix. The first and second networks are shown as heatmaps on the left, and their Omnibus Matrix is shown on the right.
# In[38]:
# fig, axs = plt.subplots(1, 3, figsize=(15, 5))
fig = plt.figure(figsize=(12, 8))
gs = GridSpec(2, 3)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[1, 0])
ax_omni = fig.add_subplot(gs[:, 1:])
# first two
cmap = list(np.array(sns.color_palette("PuOr_r", 3, ))[[1, 2, 0]])
for i, (ax, data) in enumerate(zip([ax0, ax1], [a0, a1])):
title = r"First network ($A_1$)" if i==0 else r"Second network ($A_2$)"
hm = lined_heatmap(data, ax=ax, legend=False, title=title,
colors=[cmap[0], cmap[2]])
# big one
hm = lined_heatmap(omni, ax=ax_omni, binary=False, cmap=cmap,
title="Omnibus Matrix for first \nand second network",
cbar=False, center=None)
# outline
sns.despine(ax=ax_omni, top=False, bottom=False, left=False, right=False)
# separating lines
hm.vlines(len(omni)//2, 0, len(omni), colors="black", lw=.9, alpha=1)
hm.hlines(len(omni)//2, 0, len(omni), colors="black", lw=.9, alpha=1)
for i in [.25, .75]:
hm.vlines(len(omni)*i, 0, len(omni), colors="black", lw=.9, linestyle="dashed", alpha=.6)
hm.hlines(len(omni)*i, 0, len(omni), colors="black", lw=.9, linestyle="dashed", alpha=.6)
# text
def text(label, x, y,):
ax = plt.gca()
left, width, bottom, height = .25, .5, .25, .5
right = left + width
top = bottom + height
t = ax.text(x * (left + right), y * (bottom + top), label,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes, size=32, bbox=dict(facecolor="white", edgecolor="none", alpha=.5))
text(r"$A_1$", .25, .75)
text(r"$A_2$", .75, .25)
text(r"$\frac{(A_2 + A_1)}{2}$", .25, .25)
text(r"$\frac{(A_1 + A_2)}{2}$", .75, .75)
# legend
omni_labels = np.unique(omni)
add_legend(legend_labels=omni_labels, colors=cmap)
plt.tight_layout()
# #### Creating the Omnibus Matrix For All Four Networks
# Here's the Omnibus Matrix for all four of our networks. You can see adjacency matrices for the original four networks on the diagonal blocks, highlighted in blue, and all possible pairs of averages of adjacency matrices on the off-diagonal blocks, highlighted in orange.
# In[39]:
from graspologic.embed.omni import _get_omni_matrix
omni = _get_omni_matrix(networks)
cmap = list(np.array(sns.color_palette("PuOr_r", 3))[[1, 2, 0]])
hm = lined_heatmap(omni, binary=False, cmap=cmap, cbar=False,
title="Full omnibus matrix for all four networks", center=None, alpha=0)
sns.despine(ax=hm, top=False, bottom=False, left=False, right=False)
for i in | np.arange(4) | numpy.arange |
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import cv2
import numpy as np
import gripit.edgelib.util as util
global window_size
global buffer_zone
def vertical_line(line):
line[11] = 1
# [y ; x-ts]
return [line[0] - buffer_zone, line[1] - window_size - buffer_zone], \
[line[0] + buffer_zone, line[1] + window_size + buffer_zone], \
[line[2] - buffer_zone, line[3] - window_size - buffer_zone], \
[line[2] + buffer_zone, line[3] + window_size + buffer_zone]
def horizontal_line(line):
line[11] = 2
# [y-ts ; x]
return [line[0] - window_size - buffer_zone, line[1] - buffer_zone], \
[line[0] + window_size + buffer_zone, line[1] + buffer_zone], \
[line[2] - window_size - buffer_zone, line[3] - buffer_zone], \
[line[2] + window_size + buffer_zone, line[3] + buffer_zone]
def get_orientation(line):
startpt = [line[0], line[1]]
endpt = [line[2], line[3]]
dy = abs(line[0] - line[2])
dx = abs(line[1] - line[3])
if dy > dx or dy == dx:
pt1, pt2, pt3, pt4 = vertical_line(line)
else:
pt1, pt2, pt3, pt4 = horizontal_line(line)
return pt1, pt2, pt3, pt4, startpt, endpt
def create_windows(pt1, pt2, pt3, pt4, startpt, endpt):
temp1 = np.linalg.norm(np.subtract((np.add(pt1, pt3) / 2.0), (np.add(pt2, pt4) / 2.0)))
temp2 = np.linalg.norm(np.subtract((np.add(pt1, pt4) / 2.0), (np.add(pt2, pt3) / 2.0)))
if temp1 > temp2:
win_p = [startpt, endpt, pt4, pt2]
win_n = [pt1, pt3, endpt, startpt]
else:
win_p = [startpt, pt4, endpt, pt2]
win_n = [pt1, endpt, pt3, startpt]
return win_p, win_n
def roipoly(src, poly):
mask = | np.zeros_like(src, dtype=np.uint8) | numpy.zeros_like |
"""Mobject representing curly braces."""
__all__ = ["Brace", "BraceLabel", "BraceText", "BraceBetweenPoints"]
import numpy as np
from ...animation.composition import AnimationGroup
from ...animation.fading import FadeIn
from ...animation.growing import GrowFromCenter
from ...constants import *
from ...mobject.geometry import Line
from ...mobject.svg.svg_path import SVGPathMobject
from ...mobject.svg.tex_mobject import MathTex, Tex
from ...mobject.types.vectorized_mobject import VMobject
from ...utils.color import BLACK
class Brace(SVGPathMobject):
"""Takes a mobject and draws a brace adjacent to it.
Passing a direction vector determines the direction from which the
brace is drawn. By default it is drawn from below.
Parameters
----------
mobject : :class:`~.Mobject`
The mobject adjacent to which the brace is placed.
direction : Optional[Union[:class:`list`, :class:`numpy.array`]]
The direction from which the brace faces the mobject.
See Also
--------
:class:`BraceBetweenPoints`
Examples
--------
.. manim:: BraceExample
:save_last_frame:
class BraceExample(Scene):
def construct(self):
s = Square()
self.add(s)
for i in np.linspace(0.1,1.0,4):
br = Brace(s, sharpness=i)
t = Text(f"sharpness= {i}").next_to(br, RIGHT)
self.add(t)
self.add(br)
VGroup(*self.mobjects).arrange(DOWN, buff=0.2)
"""
def __init__(
self,
mobject,
direction=DOWN,
buff=0.2,
sharpness=2,
stroke_width=0,
fill_opacity=1.0,
background_stroke_width=0,
background_stroke_color=BLACK,
**kwargs
):
path_string_template = "m0.01216 0c-0.01152 0-0.01216 6.103e-4 -0.01216 0.01311v0.007762c0.06776 0.122 0.1799 0.1455 0.2307 0.1455h{0}c0.03046 3.899e-4 0.07964 0.00449 0.1246 0.02636 0.0537 0.02695 0.07418 0.05816 0.08648 0.07769 0.001562 0.002538 0.004539 0.002563 0.01098 0.002563 0.006444-2e-8 0.009421-2.47e-5 0.01098-0.002563 0.0123-0.01953 0.03278-0.05074 0.08648-0.07769 0.04491-0.02187 0.09409-0.02597 0.1246-0.02636h{0}c0.05077 0 0.1629-0.02346 0.2307-0.1455v-0.007762c-1.78e-6 -0.0125-6.365e-4 -0.01311-0.01216-0.01311-0.006444-3.919e-8 -0.009348 2.448e-5 -0.01091 0.002563-0.0123 0.01953-0.03278 0.05074-0.08648 0.07769-0.04491 0.02187-0.09416 0.02597-0.1246 0.02636h{1}c-0.04786 0-0.1502 0.02094-0.2185 0.1256-0.06833-0.1046-0.1706-0.1256-0.2185-0.1256h{1}c-0.03046-3.899e-4 -0.07972-0.004491-0.1246-0.02636-0.0537-0.02695-0.07418-0.05816-0.08648-0.07769-0.001562-0.002538-0.004467-0.002563-0.01091-0.002563z"
default_min_width = 0.90552
self.buff = buff
angle = -np.arctan2(*direction[:2]) + np.pi
mobject.rotate(-angle, about_point=ORIGIN)
left = mobject.get_corner(DOWN + LEFT)
right = mobject.get_corner(DOWN + RIGHT)
target_width = right[0] - left[0]
linear_section_length = max(
0, (target_width * sharpness - default_min_width) / 2
)
path = path_string_template.format(
linear_section_length, -linear_section_length
)
SVGPathMobject.__init__(
self,
path_string=path,
stroke_width=stroke_width,
fill_opacity=fill_opacity,
background_stroke_width=background_stroke_width,
background_stroke_color=background_stroke_color,
**kwargs
)
self.stretch_to_fit_width(target_width)
self.shift(left - self.get_corner(UP + LEFT) + self.buff * DOWN)
for mob in mobject, self:
mob.rotate(angle, about_point=ORIGIN)
def put_at_tip(self, mob, use_next_to=True, **kwargs):
if use_next_to:
mob.next_to(self.get_tip(), np.round(self.get_direction()), **kwargs)
else:
mob.move_to(self.get_tip())
buff = kwargs.get("buff", DEFAULT_MOBJECT_TO_MOBJECT_BUFFER)
shift_distance = mob.width / 2.0 + buff
mob.shift(self.get_direction() * shift_distance)
return self
def get_text(self, *text, **kwargs):
text_mob = Tex(*text)
self.put_at_tip(text_mob, **kwargs)
return text_mob
def get_tex(self, *tex, **kwargs):
tex_mob = MathTex(*tex)
self.put_at_tip(tex_mob, **kwargs)
return tex_mob
def get_tip(self):
# Returns the position of the seventh point in the path, which is the tip.
return self.points[28] # = 7*4
def get_direction(self):
vect = self.get_tip() - self.get_center()
return vect / np.linalg.norm(vect)
class BraceLabel(VMobject):
def __init__(
self,
obj,
text,
brace_direction=DOWN,
label_constructor=MathTex,
label_scale=1,
**kwargs
):
self.label_constructor = label_constructor
self.label_scale = label_scale
VMobject.__init__(self, **kwargs)
self.brace_direction = brace_direction
if isinstance(obj, list):
obj = VMobject(*obj)
self.brace = Brace(obj, brace_direction, **kwargs)
if isinstance(text, tuple) or isinstance(text, list):
self.label = self.label_constructor(*text, **kwargs)
else:
self.label = self.label_constructor(str(text))
if self.label_scale != 1:
self.label.scale(self.label_scale)
self.brace.put_at_tip(self.label)
self.submobjects = [self.brace, self.label]
def creation_anim(self, label_anim=FadeIn, brace_anim=GrowFromCenter):
return AnimationGroup(brace_anim(self.brace), label_anim(self.label))
def shift_brace(self, obj, **kwargs):
if isinstance(obj, list):
obj = VMobject(*obj)
self.brace = Brace(obj, self.brace_direction, **kwargs)
self.brace.put_at_tip(self.label)
self.submobjects[0] = self.brace
return self
def change_label(self, *text, **kwargs):
self.label = self.label_constructor(*text, **kwargs)
if self.label_scale != 1:
self.label.scale(self.label_scale)
self.brace.put_at_tip(self.label)
self.submobjects[1] = self.label
return self
def change_brace_label(self, obj, *text):
self.shift_brace(obj)
self.change_label(*text)
return self
class BraceText(BraceLabel):
def __init__(self, obj, text, label_constructor=Tex, **kwargs):
super().__init__(obj, text, label_constructor=label_constructor, **kwargs)
class BraceBetweenPoints(Brace):
"""Similar to Brace, but instead of taking a mobject it uses 2
points to place the brace.
A fitting direction for the brace is
computed, but it still can be manually overridden.
If the points go from left to right, the brace is drawn from below.
Swapping the points places the brace on the opposite side.
Parameters
----------
point_1 : Union[:class:`list`, :class:`numpy.array`]
The first point.
point_2 : Union[:class:`list`, :class:`numpy.array`]
The second point.
direction : Optional[Union[:class:`list`, :class:`numpy.array`]]
The direction from which the brace faces towards the points.
Examples
--------
.. manim:: BraceBPExample
class BraceBPExample(Scene):
def construct(self):
p1 = [0,0,0]
p2 = [1,2,0]
brace = BraceBetweenPoints(p1,p2)
self.play(Create(NumberPlane()))
self.play(Create(brace))
self.wait(2)
"""
def __init__(self, point_1, point_2, direction=ORIGIN, **kwargs):
if all(direction == ORIGIN):
line_vector = np.array(point_2) - | np.array(point_1) | numpy.array |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = | N.array([0,1,0]) | numpy.array |
################################################################################
#
# Copyright (c) 2017 University of Oxford
# Authors:
# <NAME> (<EMAIL>)
#
# This work is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
################################################################################
import bisect
import csv
import numpy as np
import numpy.matlib as ml
from math import sin, cos, atan2, sqrt
MATRIX_MATCH_TOLERANCE = 1e-4
def build_se3_transform(xyzrpy):
"""Creates an SE3 transform from translation and Euler angles.
Args:
xyzrpy (list[float]): translation and Euler angles for transform. Must have six components.
Returns:
numpy.matrixlib.defmatrix.matrix: SE3 homogeneous transformation matrix
Raises:
ValueError: if `len(xyzrpy) != 6`
"""
if len(xyzrpy) != 6:
raise ValueError("Must supply 6 values to build transform")
se3 = ml.identity(4)
se3[0:3, 0:3] = euler_to_so3(xyzrpy[3:6])
se3[0:3, 3] = np.matrix(xyzrpy[0:3]).transpose()
return se3
def euler_to_so3(rpy):
"""Converts Euler angles to an SO3 rotation matrix.
Args:
rpy (list[float]): Euler angles (in radians). Must have three components.
Returns:
numpy.matrixlib.defmatrix.matrix: 3x3 SO3 rotation matrix
Raises:
ValueError: if `len(rpy) != 3`.
"""
if len(rpy) != 3:
raise ValueError("Euler angles must have three components")
R_x = np.matrix([[1, 0, 0],
[0, cos(rpy[0]), -sin(rpy[0])],
[0, sin(rpy[0]), cos(rpy[0])]])
R_y = np.matrix([[cos(rpy[1]), 0, sin(rpy[1])],
[0, 1, 0],
[-sin(rpy[1]), 0, cos(rpy[1])]])
R_z = np.matrix([[cos(rpy[2]), -sin(rpy[2]), 0],
[sin(rpy[2]), cos(rpy[2]), 0],
[0, 0, 1]])
R_zyx = R_z * R_y * R_x
return R_zyx
def so3_to_euler(so3):
"""Converts an SO3 rotation matrix to Euler angles
Args:
so3: 3x3 rotation matrix
Returns:
numpy.matrixlib.defmatrix.matrix: list of Euler angles (size 3)
Raises:
ValueError: if so3 is not 3x3
ValueError: if a valid Euler parametrisation cannot be found
"""
if so3.shape != (3, 3):
raise ValueError("SO3 matrix must be 3x3")
roll = atan2(so3[2, 1], so3[2, 2])
yaw = atan2(so3[1, 0], so3[0, 0])
denom = sqrt(so3[0, 0] ** 2 + so3[1, 0] ** 2)
pitch_poss = [atan2(-so3[2, 0], denom), atan2(-so3[2, 0], -denom)]
R = euler_to_so3((roll, pitch_poss[0], yaw))
if (so3 - R).sum() < MATRIX_MATCH_TOLERANCE:
return np.matrix([roll, pitch_poss[0], yaw])
else:
R = euler_to_so3((roll, pitch_poss[1], yaw))
if (so3 - R).sum() > MATRIX_MATCH_TOLERANCE:
raise ValueError("Could not find valid pitch angle")
return np.matrix([roll, pitch_poss[1], yaw])
def so3_to_quaternion(so3):
"""Converts an SO3 rotation matrix to a quaternion
Args:
so3: 3x3 rotation matrix
Returns:
numpy.ndarray: quaternion [w, x, y, z]
Raises:
ValueError: if so3 is not 3x3
"""
if so3.shape != (3, 3):
raise ValueError("SO3 matrix must be 3x3")
R_xx = so3[0, 0]
R_xy = so3[0, 1]
R_xz = so3[0, 2]
R_yx = so3[1, 0]
R_yy = so3[1, 1]
R_yz = so3[1, 2]
R_zx = so3[2, 0]
R_zy = so3[2, 1]
R_zz = so3[2, 2]
try:
w = sqrt(so3.trace() + 1) / 2
except(ValueError):
# w is non-real
w = 0
# Due to numerical precision the value passed to `sqrt` may be a negative of the order 1e-15.
# To avoid this error we clip these values to a minimum value of 0.
x = sqrt(max(1 + R_xx - R_yy - R_zz, 0)) / 2
y = sqrt(max(1 + R_yy - R_xx - R_zz, 0)) / 2
z = sqrt(max(1 + R_zz - R_yy - R_xx, 0)) / 2
max_index = max(range(4), key=[w, x, y, z].__getitem__)
if max_index == 0:
x = (R_zy - R_yz) / (4 * w)
y = (R_xz - R_zx) / (4 * w)
z = (R_yx - R_xy) / (4 * w)
elif max_index == 1:
w = (R_zy - R_yz) / (4 * x)
y = (R_xy + R_yx) / (4 * x)
z = (R_zx + R_xz) / (4 * x)
elif max_index == 2:
w = (R_xz - R_zx) / (4 * y)
x = (R_xy + R_yx) / (4 * y)
z = (R_yz + R_zy) / (4 * y)
elif max_index == 3:
w = (R_yx - R_xy) / (4 * z)
x = (R_zx + R_xz) / (4 * z)
y = (R_yz + R_zy) / (4 * z)
return np.array([w, x, y, z])
def interpolate_ins_poses(ins_path, pose_timestamps, origin_timestamp, use_rtk=False):
"""Interpolate poses from INS.
Args:
ins_path (str): path to file containing poses from INS.
pose_timestamps (list[int]): UNIX timestamps at which interpolated poses are required.
origin_timestamp (int): UNIX timestamp of origin frame. Poses will be reported relative to this frame.
Returns:
list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.
"""
with open(ins_path) as ins_file:
ins_reader = csv.reader(ins_file)
headers = next(ins_file)
ins_timestamps = [0]
abs_poses = [ | ml.identity(4) | numpy.matlib.identity |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard, core
from paddle.fluid.framework import _test_eager_guard
import paddle
class TestConcatOp(OpTest):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out':
np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
}
def get_dtype(self):
return "float64"
def test_check_output(self):
if self.dtype == np.uint16:
place = core.CUDAPlace(0)
self.check_output_with_place(place)
else:
self.check_output(check_eager=True)
def test_check_grad(self):
if self.dtype == np.uint16:
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['x0'], 'Out')
self.check_grad_with_place(place, ['x1'], 'Out')
self.check_grad_with_place(place, ['x2'], 'Out')
else:
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
def init_test_data(self):
if self.dtype == np.uint16:
x0 = np.random.random((5, 1, 4, 5)).astype(np.float32)
self.x0 = convert_float_to_uint16(x0)
x1 = np.random.random((5, 2, 4, 5)).astype(np.float32)
self.x1 = convert_float_to_uint16(x1)
x2 = np.random.random((5, 3, 4, 5)).astype(np.float32)
self.x2 = convert_float_to_uint16(x2)
else:
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = 1
class TestConcatOp2(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.axis = 1
@skip_check_grad_ci(
reason="The function 'check_grad' for large inputs is too slow.")
class TestConcatOp3(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.axis = 1
def test_check_grad(self):
pass
@skip_check_grad_ci(
reason=
"This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
)
class TestConcatOp4(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
self.axis = 0
def test_check_grad(self):
pass
class TestConcatOp5(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = -3
class TestConcatOp6(TestConcatOp):
def setUp(self):
self.op_type = "concat"
self.dtype = self.get_dtype()
self.python_api = paddle.concat
self.init_test_data()
self.lod = [[20, 80]]
self.out_lod = [[20, 80, 20, 80, 20, 80]]
self.inputs = {
'X': [('x0', (self.x0, self.lod)), ('x1', (self.x1, self.lod)),
('x2', (self.x2, self.lod))]
}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
out = np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
self.outputs = {'Out': (out, self.out_lod)}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
def init_test_data(self):
self.x0 = np.random.random([100]).astype(self.dtype)
self.x1 = np.random.random([100]).astype(self.dtype)
self.x2 = np.random.random([100]).astype(self.dtype)
self.axis = 0
def create_test_AxisTensor(parent):
class TestConcatAxisTensor(parent):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
self.inputs = {
'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)],
'AxisTensor': np.array([self.axis]).astype("int32")
}
self.attrs = {}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out':
np.concatenate((self.x0, self.x1, self.x2),
axis=self.actual_axis)
}
cls_name = "{0}_{1}".format(parent.__name__, "AxisTensor")
TestConcatAxisTensor.__name__ = cls_name
globals()[cls_name] = TestConcatAxisTensor
create_test_AxisTensor(TestConcatOp)
create_test_AxisTensor(TestConcatOp2)
create_test_AxisTensor(TestConcatOp3)
create_test_AxisTensor(TestConcatOp4)
create_test_AxisTensor(TestConcatOp5)
create_test_AxisTensor(TestConcatOp6)
#----------------Concat Fp16----------------
def create_test_fp16(parent):
class TestConcatFp16(parent):
def get_dtype(self):
return np.float16
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestConcatFp16.__name__ = cls_name
globals()[cls_name] = TestConcatFp16
create_test_fp16(TestConcatOp)
create_test_fp16(TestConcatOp2)
create_test_fp16(TestConcatOp3)
create_test_fp16(TestConcatOp4)
create_test_fp16(TestConcatOp5)
create_test_fp16(TestConcatOp6)
#----------------Concat Bf16----------------
def create_test_bf16(parent):
@unittest.skipIf(not paddle.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestConcatBf16(parent):
def get_dtype(self):
return np.uint16
cls_name = "{0}_{1}".format(parent.__name__, "Bf16")
TestConcatBf16.__name__ = cls_name
globals()[cls_name] = TestConcatBf16
create_test_bf16(TestConcatOp)
class TestConcatOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of concat_op should be list.
x1 = fluid.layers.data(shape=[4], dtype='int32', name='x1')
fluid.layers.concat(x1)
# The item in input must be Variable.
x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = fluid.layers.data(shape=[4], dtype='uint8', name='x4')
x5 = fluid.layers.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7')
x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8')
fluid.layers.concat([x6, x7])
# The type of axis in concat_op should be int or Variable.
def test_axis_type():
fluid.layers.concat([x6, x7], 3.2)
self.assertRaises(TypeError, test_axis_type)
def test_input_same_dtype():
fluid.layers.concat([x7, x8])
self.assertRaises(TypeError, test_input_same_dtype)
class TestConcatAPI(unittest.TestCase):
def test_fluid_api(self):
paddle.enable_static()
x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
fluid.layers.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
out_1 = fluid.layers.concat(input=[x_2, x_3], axis=1)
out_2 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int32)
out_3 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int64)
exe = fluid.Executor(place=fluid.CPUPlace())
[res_1, res_2, res_3] = exe.run(fluid.default_main_program(),
feed={
"x_1": input_2,
"x_2": input_2,
"x_3": input_3
},
fetch_list=[out_1, out_2, out_3])
assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
def test_api(self):
paddle.enable_static()
x_1 = paddle.fluid.data(shape=[None, 1, 4, 5],
dtype='int32',
name='x_1')
paddle.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1)
negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3)
out_1 = paddle.concat(x=[x_2, x_3], axis=1)
out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)
exe = paddle.static.Executor(place=paddle.CPUPlace())
[res_1, res_2, res_3,
res_4] = exe.run(paddle.static.default_main_program(),
feed={
"x_1": input_2,
"x_2": input_2,
"x_3": input_3
},
fetch_list=[out_1, out_2, out_3, out_4])
assert np.array_equal(res_1, | np.concatenate((input_2, input_3), axis=1) | numpy.concatenate |
import shutil
import hashlib
import json
import os
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
def override_config_recurs(config, config_extension):
try:
config_extension['name'] = config['name']+'_'+config_extension['name']
except KeyError:
pass
for key, value in config_extension.items():
if type(value) is dict:
config[key] = override_config_recurs(config[key], config_extension[key])
else:
assert key in config, "Warning, key defined in extension but not original : key is {}".format(key)
config[key] = value
return config
def compute_epsilon_schedule(config, n_epochs):
epsilon_schedule = config["epsilon_schedule"][0]
epsilon_init = config["epsilon_schedule"][1]
if epsilon_schedule == 'linear':
eps_range = np.linspace(epsilon_init, 0., n_epochs)
elif epsilon_schedule=='constant':
eps_range = [epsilon_init for _ in range(n_epochs)]
elif epsilon_schedule=='exp':
eps_decay = n_epochs / 4.
eps_range = [epsilon_init * np.exp(-1. * i / eps_decay) for i in range(n_epochs)]
else:
raise NotImplementedError("Wrong type of epsilon-greedy schedule")
return eps_range
def load_single_config(config_file):
with open(config_file, 'rb') as f_config:
config_str = f_config.read().decode('utf-8')
config = json.loads(config_str)
return config, config_str
def load_config_and_logger(env_config_file, model_config_file, exp_dir, seed,
args=None,
env_ext_file=None,
model_ext_file=None):
# To have a unique id, use raw str, concatenate them and hash it
config_str = ''
config_str_env_ext = ''
config_str_model_ext = ''
# Load env file and model
env_config, config_str_env = load_single_config(env_config_file)
model_config, config_str_model = load_single_config(model_config_file)
# Override env and model files if specified
if env_ext_file is not None:
env_ext_config, config_str_env_ext = load_single_config(env_ext_file)
env_config = override_config_recurs(env_config, env_ext_config)
if model_ext_file is not None:
model_ext_config, config_str_model_ext = load_single_config(model_ext_file)
model_config = override_config_recurs(model_config, model_ext_config)
# Merge env and model config into one dict
env_config['env_name'] = env_config['name']
env_config.update(model_config)
config = env_config
# set seed
set_seed(seed)
# Compute unique identifier based on those configs
config_str = config_str_env + config_str_env_ext + config_str_model + config_str_model_ext
exp_identifier = hashlib.md5(config_str.encode()).hexdigest()
# Save_path is the actual experiments path
# here, you save the experimental results, the rewards, the length etc ...
save_path = '{}/{{}}'.format(os.path.join(exp_dir, config['env_name'], exp_identifier, "seed"+str(seed)))
# General_save_path is the path of the model used (without the seed)
# This way, you store the general information such as the name, the config file etc ...
general_save_path = '{}/{{}}'.format(os.path.join(exp_dir, config['env_name'], exp_identifier))
if not os.path.isdir(save_path.format('')):
os.makedirs(save_path.format(''))
# Write which config files were used, in case the names in config are not set
with open(general_save_path.format("config_files.txt"), "w") as f:
f.write(env_config_file)
if env_ext_file:
f.write(" "+env_config_file+"\n")
else:
f.write("None")
f.write("\n")
f.write(model_config_file)
if model_ext_file:
f.write(" "+model_ext_file+"\n")
else:
f.write("None")
# Create empty training file
open(save_path.format('train_lengths'), 'w').close()
open(save_path.format('train_rewards'), 'w').close()
open(save_path.format('train.log'), 'w').close()
open(general_save_path.format('model_name'), 'w').write(config['name'])
# Create logger
logger = create_logger(save_path.format('train.log'))
logger.info("Config Hash {}".format(exp_identifier))
logger.info("Config name : {}".format(config["name"]))
logger.info(config)
if args is not None:
for key, val in vars(args).items():
logger.info("{} : {}".format(key, val))
# copy config file
with open(general_save_path.format('config.json'), 'w') as f:
json.dump(config, f, indent=4, separators=(',', ': '))
return config, exp_identifier, save_path
def create_logger(save_path):
logger = logging.getLogger()
# Debug = write everything
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler(save_path, 'a', 1000000, 1)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
def set_seed(seed):
import torch
import random
if seed > -1:
print('Using seed {}'.format(seed))
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
else:
raise NotImplementedError("Cannot set negative seed")
def write_seed_extensions(seed_range, out_name='../config/seed_extensions/'):
for seed in seed_range:
with open(out_name + str(seed), 'w+', encoding="utf8") as f_extension:
json.dump({"seed": seed}, f_extension)
def save_stats(save_path, reward_list, length_list):
reward_list = np.array(reward_list)
length_list = | np.array(length_list) | numpy.array |
# Copyright (c) [2021] [wlicsnju]
# [HKMF-T] is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
import logging
import numpy as np
class TagMean(object):
def __init__(self):
self._data = None
self._mask = None
self._tag = None
self._dim = 1
def put_and_reset(self, data, mask, tag):
if data.shape != tag.shape:
logging.error(f'\'data\'{data.shape} must be same to \'tag\'{tag.shape}.', ValueError)
return
if len(mask.shape) != 1:
logging.error(f'\'mask\'{mask.shape} must be 1-d array.', ValueError)
return
if len(data.shape) == 1:
self._data = data[np.newaxis, :]
tag = tag[np.newaxis, :]
elif len(data.shape) == 2:
self._data = data
elif len(data.shape) > 2:
logging.error('Do not support data with dim > 2.', ValueError)
return
self._dim = self._data.shape[0]
self._mask = mask
self._tag = tag
def train(self, *args):
logging.warning('Tag Mean class do not train for result.')
def get_result(self):
all_avg = np.zeros((self._dim, ))
tag_sum = {}
tag_count = {}
for i, m in enumerate(self._mask):
tag = self._tag[0, i]
if m > 0:
if tag not in tag_sum:
tag_sum[tag] = np.zeros((self._dim, ))
tag_count[tag] = 0
all_avg += self._data[:, i]
tag_sum[tag] += self._data[:, i]
tag_count[tag] += 1
all_avg = all_avg / float(np.sum(self._mask != 0))
blackout_l = | np.sum(self._mask == 0) | numpy.sum |
import re
from typing import Sequence, Any, Union, Dict, Optional, Callable
from re import split
from pathlib import Path
import numpy as np
from sigpipes.auxtools import type_info, smart_tostring, TimeUnit
import h5py
import csv
class DPath:
def __init__(self, root, dir, stem, suffix):
self.root = root
self.dir = dir
self.stem = stem
self.suffix = suffix
@staticmethod
def from_path(path, dir=False):
p = Path(path)
parts = Path(path).parts
if p.is_absolute():
root = parts[0]
sind = 1
else:
root = ""
sind = 0
if dir:
if sind < len(parts):
dir = str(Path(parts[sind]).joinpath(*parts[sind+1:]))
else:
dir = ""
stem = ""
suffix = ""
else:
if sind < len(parts)-1:
dir = str(Path(Path(parts[sind]).joinpath(*parts[sind+1:-1])))
else:
dir = ""
if parts:
suffix = "".join(Path(parts[-1]).suffix)
stem = str(Path(parts[-1]))[:-len(suffix)] if suffix else str(Path(parts[-1]))
else:
suffix = ""
stem = ""
return DPath(root, dir, stem, suffix)
def extend_stem(self, extension, *, sep="_"):
if extension == "":
return self
assert self.stem != ""
return DPath(self.root, self.dir, self.stem + sep + extension, self.suffix)
def resuffix(self, newsuffix):
assert self.stem != ""
return DPath(self.root, self.dir, self.stem, newsuffix)
def restem(self, newstem):
return DPath(self.root, self.dir, newstem, self.suffix)
def __repr__(self):
return f"root: {self.root}, dir: {self.dir}, stem: {self.stem}, suffix: {self.suffix}"
def __str__(self):
return str(Path(self.root).joinpath(self.dir, self.stem+self.suffix))
@property
def empty(self):
return self.root == "" and self.dir == "" and self.stem == "" and self.suffix == ""
def prepend_path(self, prep):
if prep.empty:
return self
if self.root:
raise ValueError(f"Absolute path is not prependable {repr(prep)} < {self}")
assert prep.stem == "" and prep.suffix == ""
return DPath(prep.root, str(Path(prep.dir)/Path(self.dir)), self.stem, self.suffix)
def base_path(self, base):
if self.dir == "" and self.root == "":
root = base.root
dir = base.dir
else:
root = self.root
dir = self.dir
if self.stem == "":
stem = base.stem
suffix = base.suffix
else:
stem = self.stem
suffix = self.suffix
return DPath(root, dir, stem, suffix)
def folder_copy(newdict: Dict[str, Any], olddict: Dict[str, Any],
segpath: str, shared_folders: Sequence[str],
empty_folders: Sequence[str]) -> None:
for key, value in olddict.items():
itempath = f"{segpath}/{key}" if segpath != "" else key
if isinstance(value, dict):
if itempath in empty_folders:
newdict[key] = {}
elif itempath in shared_folders:
newdict[key] = olddict[key]
else:
newdict[key] = {}
folder_copy(newdict[key], olddict[key], itempath, shared_folders, empty_folders)
else:
newdict[key] = value
def hdict_map(d: Dict[str, Any], function):
for key, value in d.items():
if isinstance(value, dict):
hdict_map(value, function)
else:
d[key] = function(value)
class HierarchicalDict:
"""
Dictionary with hierarchical keys implemented by structure od nested (standard) dictionaries.
Individual levels in hierarchical keys are separated by slashes (initial or final slashes
are optional).
"""
def __init__(self, root: Dict[str, Any] = None):
self.root = {} if root is None else root
def _create_path(self, key: str):
path = split(r"/+", key.strip("/"))
adict = self.root
for folder in path[:-1]:
if folder not in adict:
adict[folder] = {}
if not isinstance(adict[folder], dict):
raise KeyError(f"{folder} in {key} is leaf not folder")
adict = adict[folder]
return path, adict
def __setitem__(self, key: str, value: Any) -> None:
"""
Set value with given hierarchical key (path). It creates all levels in the
path. The value can have any type except "dict" (empty folders can be created
by `make_folder` method)
"""
assert not isinstance(value, dict), "dict is invalid leaf value"
path, adict = self._create_path(key)
leaf = path[-1]
if isinstance(adict.get(leaf, None), dict):
raise KeyError(f"leaf name {leaf} in {key} is folder")
adict[leaf] = value
def __getitem__(self, key: str) -> Any:
"""
Get value with given hierarchical key (path). Empty path is invalid.
"""
path = split(r"/+", key.strip("/"))
adict = self.root
for folder in path[:-1]:
if folder not in adict:
raise KeyError(f"folder {folder} in key {key} does not exist")
adict = adict[folder]
leaf = path[-1]
if leaf not in adict:
raise KeyError(f"leaf {leaf} in key {key} does not exist")
return adict[leaf]
def __contains__(self, key: str) -> bool:
"""
Testing existence of given hierarchical key (in the form of leaf value or folder).
"""
path = split(r"/+", key.strip("/"))
adict = self.root
for folder in path:
if folder not in adict:
return False
adict = adict[folder]
return True
def deepcopy(self, shared_folders: Sequence[str]=[],
empty_folders: Sequence[str] = [],
root: Optional[str] = None) -> "HierarchicalDict":
"""
Deep copy of folders of hierarchical tree or subtree (leaf values are not
duplicated)
Args:
shared_folders: folders which are not duplicated but they are shared
empty_folders: folders, which contents are not copied
(i.e. folders are empty in duplicate]
root: path to copied subtree (if None all tree is copied]
Returns:
duplicate or partial duplicate
"""
new_hdict = {}
folder_copy(new_hdict, self.root if root is None else self[root], "",
shared_folders, empty_folders)
return HierarchicalDict(new_hdict)
def make_folder(self, key: str) -> None:
"""
Creation of empty folder with given path (all levels of path are
created)
Args:
key: path to new folder
"""
path, adict = self._create_path(key)
folder_name = path[-1]
if folder_name in adict:
raise KeyError(f"key {folder_name} exists")
adict[folder_name] = {}
def __str__(self):
return HierarchicalDict.rec_print(self.root, 0)
def __repr__(self):
return HierarchicalDict.rec_print(self.root, 0)
def map(self, function: Callable[[Any], Any], root: Optional[str] = None):
"""
Map (unary) function on all values of given subtree. Map changes original subtree
(no duplication is performed).
Args:
function: mapping function (it must be defined for all values in subtree)
root: path to subtree (or None if the whole hierarchical is modified)
"""
hdict_map(self.root if root is None else self[root], function)
@staticmethod
def rec_print(d, level: int):
return "\n".join(
f"{' ' * level}{key}: {type_info(value)} {smart_tostring(value, level + len(key))}"
if not isinstance(value, dict)
else f"{' ' * level}{key}/\n{HierarchicalDict.rec_print(value, level + 1)}"
for key, value in d.items())
def __iter__(self):
yield from self.rec_iterator(self.root, "")
def rec_iterator(self, d, prefix):
for key, value in d.items():
path = prefix + "/" + key
if isinstance(value, dict):
yield from self.rec_iterator(d[key], path)
else:
yield path, value
class SigContainer:
"""
Hierarchical container for physiological signal data (including annotations and
auxiliary (meta)data).
"""
def __init__(self, data: HierarchicalDict) -> None:
"""
Private constructor. Use factory methods: `from_signal_array` or `from_hdf5.`
"""
self.d = data
@staticmethod
def from_signal_array(signals: np.ndarray, channels: Sequence[str],
units: Sequence[str], fs: float = 1.0, basepath: str = "") -> "SigContainer":
"""
Creation of container from signal data (in numpy array) and basic metadata.
Args:
basepath: base path for filenames
signals: signals as 2D array, channels in rows
channels: identifiers of channels
units: units of channels data
fs: (common) sampling frequency
basepath: path to file resource
"""
d = HierarchicalDict()
d["signals/data"] = signals
d["signals/channels"] = channels
d["signals/units"] = units
d["signals/fs"] = fs
d["log"] = []
d["basepath"] = str(DPath.from_path(basepath))
d.make_folder("meta")
return SigContainer(d)
def add_annotation(self, annotator: str,
samples: Sequence[int], types: Sequence[str],
notes: Sequence[str]) -> "SigContainer":
"""
Add the set of annotations to container.
See https://physionet.org/physiobank/annotations.shtml for detailed description.
Args:
annotator: short identifier of source of this annotations
samples: list of annotated samples in signal
types: list of short (one character) identification of individual annotations
(one per list of samples)
notes: longer description of individual annotations
(one per record in list of samples)
"""
self.d[f"annotations/{annotator}/samples"] = samples
self.d[f"annotations/{annotator}/types"] = types
self.d[f"annotations/{annotator}/notes"] = notes
return self
def __getitem__(self, key: str) -> Any:
"""
Getter for container data (signals, annotations, metadata, etc-)
"""
return self.d[key]
@property
def signals(self):
"""
Signal data (2D numpy array)
"""
return self.d["signals/data"]
def feature(self, name: str):
return self.d[f"/meta/features/{name}"]
def __str__(self):
return str(self.d)
def get_channel_triple(self, i):
"""
Auxiliary getter for signal of i-th channel with metadata.
Returns:
triple (data of channel, channel id, channel unit)
"""
return (self.d["signals/data"][i, :], self.d["signals/channels"][i],
self.d["signals/units"][i])
@property
def sample_count(self):
"""
Number of samples in signal.
"""
return self.d["signals/data"].shape[1]
@property
def channel_count(self):
"""
Number of channels,
"""
return self.d["signals/data"].shape[0]
@property
def basepath(self):
return DPath.from_path(self.d["basepath"])
@property
def id(self):
"""
Unique identifier of container state (sequence of modifications performed
on container). This identifier can be used as unique filename for outputs.
"""
return "~".join(op for op in self.d["log"]
if not op.startswith("#")).replace(".", ",").replace(" ", "")
@property
def lag(self):
return self.d["/signals/lag"] if "/signals/lag" in self.d else 0
def x_index(self, index_type: TimeUnit, fs: Union[int, float]):
"""
Returns:
X-axis array for signal data in given time units (time representation).
"""
if index_type == TimeUnit.SAMPLE:
index = np.arange(0, self.sample_count) - self.lag
else:
interval = 1.0 / fs
index = np.linspace(0.0, self.sample_count * interval, self.sample_count,
endpoint=False) - self.lag * interval
if index_type == TimeUnit.TIME_DELTA:
index = np.fromiter((np.timedelta64(int(t * 1_000_000_000), "ns") for t in index),
dtype="timedelta64[ns]")
return index
def get_annotation_positions(self, specifier: str, index_type: TimeUnit,
fs: Union[int, float]):
"""
Positions of annotations (markers) in given time units (time representation).
Args:
specifier: identification of annotation source (annotator)
index_type: time line representation
fs: sample frequency
"""
annotator, achar, astring = SigContainer.annotation_parser(specifier)
if annotator not in self.d["annotations"]:
raise KeyError(f"""Invalid annotator {annotator}
(only {','.join(self.d['annotations'].keys())} are included)""")
samples = np.array(self.d[f"annotations/{annotator}/samples"])
types = np.array(self.d[f"annotations/{annotator}/types"])
notes = np.array(self.d[f"annotations/{annotator}/notes"])
if achar is not None:
samples = samples[types == achar]
notes = notes[types == achar]
if astring is not None:
r = re.compile(astring)
vmatch = np.vectorize(lambda text: bool(r.fullmatch(text)))
samples = samples[vmatch(notes)]
return self.x_index(index_type, fs)[samples]
@staticmethod
def annotation_parser(aname: str) -> Sequence[str]:
match = re.fullmatch(r"([.\w]+)(?:/(.))?(?:=(.+))?", aname)
if match:
return match.groups()
else:
raise KeyError(f"Invalid annotation specification `{aname}`")
def get_fft_tuple(self, i: int, source: str):
return self.d[f"{source}/data"][i, :], self.d[f"{source}/channels"][i]
@staticmethod
def from_hdf5(filename: str, *, path: str = None, use_saved_path: bool = False) -> "SigContainer":
data = HierarchicalDict()
with h5py.File(filename, "r") as f:
f.visititems(lambda name, item : SigContainer._visitor(data, name, item))
if not use_saved_path:
data["basepath"] = str(DPath.from_path(filename).prepend_path(DPath.from_path(path, dir=True)))
return SigContainer(data)
@staticmethod
def hdf5_cache(source, operator: "SigOperator", path: str = "") -> "SigContainer":
path = DPath.from_path(path).base_path(source._filepath.extend_stem("_cache").resuffix(".hdf5"))
if Path(str(path)).exists():
return SigContainer.from_hdf5(str(path), use_saved_path=True)
else:
from sigpipes.sigoperator import Hdf5
return source.sigcontainer() | operator| Hdf5(str(path))
@staticmethod
def _visitor(data: HierarchicalDict, name: str, item: Union[h5py.Group, h5py.Dataset]) -> None:
if isinstance(item, h5py.Group):
data.make_folder(name)
elif isinstance(item, h5py.Dataset):
type = item.attrs["type"]
if type == "int":
data[name] = int(item[0])
elif type == "float":
data[name] = float(item[0])
elif type == "str":
data[name] = item[0].decode(encoding="utf-8")
elif type == "list":
data[name] = list(item[()])
elif type == "str_list":
data[name] = list(s.decode(encoding='UTF-8') for s in item[()])
elif type in ["ndarray", "str_ndarray"]:
data[name] = item[()]
@staticmethod
def from_csv(filename: str, * , dir: str = None, dialect: str = "excel", header: bool = True,
default_unit: str = "unit", fs=None, transpose: bool = False,
annotation: Union[str,Sequence[str]] = None) -> "SigContainer":
"""
Args:
filename: absolute or relative file name
dir: base directory of relative file name (optional)
dialect: dialect of CSV (see CSV module)
header: first line is header with channels names and units (in parenthesis)
default_unit: default unit (if is not defined by header)
fs: sampling frequency, if smapling frequency is not provided, the frequency is
derived from first column which must containts second timestamps
annotation: filename of annotation file
Returns:
"""
if dir is not None:
filepath = Path(dir) / Path(filename)
else:
filepath = Path(filename)
signals = []
times = []
units = None
headers = None
with open(filepath, "rt", newline='') as csvfile:
reader = csv.reader(csvfile, dialect=dialect)
if header:
row = next(reader)
headers = row[1:] if fs is None else row
for row in reader:
if fs is None:
times.append(float(row[0]))
signals.append([float(x) for x in row[1:]])
else:
signals.append([float(x) for x in row])
if not transpose:
data = np.array(signals).transpose()
else:
data = | np.array(signals) | numpy.array |
import os
import numpy as np
from mapgwm.headobs import preprocess_headobs, get_data
def test_preprocess_headobs(test_output_folder, test_data_path):
# input files
#data_file = os.path.join(test_data_path, 'headobs', 'GW_monthly_stats_test.txt')
data_file = test_data_path / 'headobs/GW_monthly_stats1990-01-01_2019-12-31.txt'
#metadata_file = os.path.join(test_data_path, 'headobs', 'GW_monthly_meta_test.txt')
metadata_file = test_data_path / 'headobs/GW_monthly_meta1990-01-01_2019-12-31.txt'
# output
outputfile = os.path.join(test_output_folder, 'preprocessed_monthly_output.csv')
start_date = '1998-04-01'
# areas of interest within model to break out as separate observation groups
geographic_groups = [test_data_path / 'extents/CompositeHydrographArea.shp',
test_data_path / 'extents/MAP_generalized_regions.shp'
]
# read the data
data_orig, metadata_orig = get_data(data_file, metadata_file)
data, metadata = preprocess_headobs(data_orig, metadata_orig,
head_data_columns=['head', 'last_head'],
data_length_units='feet',
active_area=os.path.join(test_data_path, 'extents/ms_delta.shp'),
source_crs=4269, dest_crs=5070,
start_date=start_date,
geographic_groups=geographic_groups,
geographic_groups_col='obsgroup',
outfile=outputfile)
assert os.path.exists(os.path.join(test_output_folder, 'preprocessed_monthly_output_info.shp'))
assert os.path.exists(os.path.join(test_output_folder, 'preprocessed_monthly_output_info.csv'))
assert np.all(data.columns ==
['site_no', 'datetime', 'head', 'last_head', 'head_std', 'n', 'obsprefix'])
assert not any(set(data.obsprefix).difference(metadata.obsprefix))
assert not any({'site_no', 'x', 'y', 'screen_botm', 'screen_top',
'category', 'group'}.difference(metadata.columns))
assert metadata['n'].dtype == np.integer
# unit conversion was applied evenly
assert np.allclose(data['head'].values, data.last_head.values, rtol=0.1)
assert np.allclose(metadata['head'].values, metadata.last_head.values, rtol=0.1)
if data_orig.head_std.any() and data.head_std.any():
assert np.allclose( | np.nanmean(data_orig.head_std) | numpy.nanmean |
import numpy as np
from LevyVector import levy
import AdaptiveStrategy as Adaption
def evolve(problem, n_eval=300000, n_pop=100,
diff_mode="de/curr-to-pbest/1",
F=0.5, CR=0.9, p=0.05,
adapt_params=[], adapt_strategy="none", params_of_adapt_strategy={},
indicator="none", n_step=1,
epsilon=1e-14, seed=1000, is_print=True, file="none"):
'''
Differential Evolution (DE) with Self-adaptive strategy
Problem Parameters
----------
problem: object
- optimization problem to be solved
Running Parameters
----------
n_eval: int
- maximum number of evaluations
n_pop: int
- population size
epsilon: float
- tolerance of algorithm running
seed: int
- seed of random number
is_print: bool
- whether print results on screen or not
file: string
- file to record history
- valid input:
+ "none": do not record
+ otherwise, write to the file name
Algorithm Parameters
----------
diff_mode: string
- mode of differential mutation
- valid input: { "de", "de/curr-to-pbest", "de/curr-to-pbest/1" }
Variation Parameters
----------
F, CR, p: float OR tuple
- scaling factor, crossover rate, elite rate
- valid input:
+ for strategy type 1, input (lower, upper)
+ for strategy type 2, input (lower, mean, upper)
+ for non-adaptive, input fixed float
* change { adapt_params, adapt_strategy } correspondingly
Strategy Parameters
----------
adapt_params: list of string
- parameters which are set to be adaptive
- valid input: { ["F"], ["CR"], ["F, CR"] }
* change { F, CR, adapt_strategy } correspondingly
adapt_strategy: string
- parameter variation method in adaptive strategy
- valid input: { "none", "random", "best_levy", "ga", "cauchy", "jade" }
* change { F, CR, adapt_params } correspondingly
indicator: string
- indicator used to assess quality of parameters
automatically set to "none" if adapt_strategy == "none" or "random"
- valid input: { "dfii/dfi" } !TODO - increase more indicators
n_step: int
- frequency to update parameters
e.g. n_step=20 means to parameters every 20 generations
Return
----------
X: 2D-Array
- decision variables of individuals (final evaluation)
F: 1D-Array
- fitness variables of individuals (final evaluation)
c_eval: int
- number of evaluations
'''
if type(seed) == int: np.random.seed(seed) # Set seed for random number generator
evaluate_fitness = problem.f # Get problem information
xl, xu = problem.boundaries
n_var = problem.n_var
#####################
# PARAMETER SETTING #
#####################
strategy_type1 = ["best_levy", "ga", "random"] # Strategy with random parameter initialization
strategy_type2 = ["cauchy"] # Strategy with cauchy inintialization
strategy_type3 = ["jade"] # Strategy with initialization in JADE
if adapt_strategy == "none":
Fs = np.ones(n_pop) * F
CRs = np.ones(n_pop) * CR
if adapt_strategy in strategy_type1: # Initialize parameters with uniform distribution
if "F" in adapt_params:
Fs = np.random.uniform(F[0], F[1], n_pop)
if type(CR) == float: CRs = np.ones(n_pop) * CR
if "CR" in adapt_params:
if type(F) == float: Fs = np.ones(n_pop) * F
CRs = np.random.uniform(CR[0], CR[1], n_pop)
if adapt_strategy in strategy_type2: # Initialize parameters with Cauchy distribution
if "F" in adapt_params:
F_mu = F[1]
Fs = np.random.standard_cauchy(n_pop) * 0.1 + F_mu
Fs = np.clip(Fs, F[0], F[2])
if type(CR) == float: CRs = np.ones(n_pop) * CR
if "CR" in adapt_params:
CR_mu = CR[1]
if type(F) == float: Fs = np.ones(n_pop) * F
CRs = np.random.standard_cauchy(n_pop) * 0.1 + CR_mu
CRs = np.clip(CRs, CR[0], CR[2])
if adapt_strategy in strategy_type3: # Initialize F with Cauchy distribution, CR with Gaussian distribution
if "F" in adapt_params:
F_mu = F[1]
Fs = np.random.standard_cauchy(n_pop) * 0.1 + F_mu
Fs = np.clip(Fs, F[0], F[2])
if type(CR) == float: CRs = np.ones(n_pop) * CR
if "CR" in adapt_params:
CR_mu = CR[1]
if type(F) == float: Fs = np.ones(n_pop) * F
CRs = np.random.normal(CR_mu, 0.1, n_pop)
CRs = np.clip(CRs, CR[0], CR[2])
#################
# START PROGRAM #
#################
X = np.random.uniform(xl, xu, (n_pop, n_var)) # Initialize population
Y = np.array([evaluate_fitness(x) for x in X]) # Evaluate fitness
c_eval, c_gen = n_pop, 0
if adapt_strategy in ["none", "random"]:
indicator = "none"
else:
I = np.zeros(n_pop)
#############
# MAIN LOOP #
#############
while True: # Enter generation loop
c_gen += 1
for i in np.random.permutation(n_pop): # Traverse population
xi, yi = X[i,:], Y[i] # Get current individual
Fi, CRi = Fs[i], CRs[i] # Get current parameters
maskbit = np.random.choice([0, 1], n_var, p=[1 - CRi, CRi]) # Get maskbit for crossover
if diff_mode == "de": # Reproduce by DE
xr1 = X[np.random.choice(n_pop),:]
xr2 = X[np.random.choice(n_pop),:]
xi_ = xi + Fi * (xr1 - xr2) * maskbit
if diff_mode == "de/curr-to-pbest": # Reproduce by DE/current to pbest
pbest = sorted(np.arange(n_pop), key=lambda k: Y[k])[:int(p * n_pop)]
xpbest = X[np.random.choice(pbest),:]
xi_ = xi + Fi * (xpbest - xi) * maskbit
if diff_mode == "de/curr-to-pbest/1": # Reproduce by DE/current to pbest/1
pbest = sorted(np.arange(n_pop), key=lambda k: Y[k])[:int(p * n_pop)]
xpbest = X[np.random.choice(pbest),:]
xr1 = X[np.random.choice(n_pop),:]
xr2 = X[np.random.choice(n_pop),:]
xi_ = xi + Fi * (xpbest - xi + xr1 - xr2) * maskbit
xi_ = np.clip(xi_, xl, xu) # Repair to satisfy constraint
yi_ = evaluate_fitness(xi_) # Evaluate offspring
c_eval += 1
if yi_ < yi: # Select better individual
X[i], Y[i] = xi_, yi_
if c_eval >= n_eval or min(Y) - problem.optimalF <= epsilon:# Termination criteria
if is_print == True:
print("{}, {:.2e}, {:.1f}, {:.1f}". # Print results on screen
format(c_eval, min(Y), np.mean(Fs), np.mean(CRs)))
if file != "none": # Write results to file
history = open(file, "a")
history.write(f"{c_eval},{min(Y)},{np.mean(Fs)},{np.mean(CRs)}\n")
history.close()
return X, Y, c_eval
if indicator == "dfii/fi": # Cumulate indicator
I[i] += max([yi - yi_, 0]) / (yi - min(Y) + 1e-14)
if indicator == "dfii":
I[i] += max([yi - yi_, 0])
if is_print == True:
print("{}, {:.2e}, {:.1f}, {:.1f}". # Print results on screen
format(c_eval, min(Y), np.mean(Fs), np.mean(CRs)))
if file != "none": # Write results to file
history = open(file, "a")
history.write(f"{c_eval},{min(Y)},{np.mean(Fs)},{np.mean(CRs)}\n")
history.close()
############
# ADAPTION #
############
if c_gen % n_step == 0:
if adapt_strategy == "none": # No adaption
Fs, CRs = Fs, CRs
if adapt_strategy == "random": # Random adaption
Fs, CRs = Fs, CRs
if "F" in adapt_params:
Fs = Adaption.rand_adapt(Fs, F[0], F[1])
if "CR" in adapt_params:
CRs = Adaption.rand_adapt(CRs, CR[0], CR[1])
"""
!TODO - design a better variation for best_levy
"""
if adapt_strategy == "best_levy": # Best levy adaption
Fs, CRs = Fs, CRs
if "CR" in adapt_params:
CRs = Adaption.best_levy_adapt(CRs, I[:], CR[0], CR[1], params_of_adapt_strategy)
if "F" in adapt_params:
Fs = Adaption.best_levy_adapt(Fs, I[:], F[0], F[1], params_of_adapt_strategy)
if adapt_strategy == "ga" and | np.sum(I) | numpy.sum |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import time
import json
import numpy as np
import numba as nb
from enum import Enum
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Core.snapshot_pf_data import compile_snapshot_circuit
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis, make_worst_contingency_transfer_limits
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Simulations.results_table import ResultsTable
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import DriverTemplate
########################################################################################################################
# Optimal Power flow classes
########################################################################################################################
class AvailableTransferMode(Enum):
Generation = 0
InstalledPower = 1
Load = 2
GenerationAndLoad = 3
@nb.njit()
def compute_alpha(ptdf, P0, Pinstalled, idx1, idx2, bus_types, dT=1.0, mode=0):
"""
Compute all lines' ATC
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param P0: all bus injections [p.u.]
:param idx1: bus indices of the sending region
:param idx2: bus indices of the receiving region
:param bus_types: Array of bus types {1: pq, 2: pv, 3: slack}
:param dT: Exchange amount
:param mode: Type of power shift
0: shift generation based on the current generated power
1: shift generation based on the installed power
2: shift load
3 (or else): shift using generation and load
:return: Exchange sensitivity vector for all the lines
"""
nbr = ptdf.shape[0]
nbus = ptdf.shape[1]
# declare the bus injections increment due to the transference
dP = | np.zeros(nbus) | numpy.zeros |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for performing retrieval."""
import abc
from concurrent import futures
import time
from absl import logging
from language.realm import featurization
from language.realm import parallel
from language.realm import profile
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
class Retriever(abc.ABC):
"""Retrieves documents for a query."""
@abc.abstractmethod
def retrieve(self, query_batch):
"""Retrieves candidates for a batch of queries.
Args:
query_batch (list[Query]): a list of queries.
Returns:
a batch of lists, where each list is a list of Documents for the
corresponding query.
"""
raise NotImplementedError()
class DummyRetriever(Retriever):
"""Dummy retriever for testing."""
def __init__(self, num_neighbors):
self._num_neighbors = num_neighbors
self.total_candidates = 13353718
self.embed_dim = 128
with tf.device('/CPU:0'):
self._doc_embeds = tf.zeros((self.total_candidates, self.embed_dim))
def retrieve(self, query_batch):
# [batch_size, embed_dim]
query_embeds = tf.zeros((len(query_batch), self.embed_dim))
with tf.device('/CPU:0'):
# [batch_size, total_candidates]
cand_scores = tf.matmul(query_embeds, self._doc_embeds, transpose_b=True)
_, top_ids_batch = tf.math.top_k(cand_scores, k=self._num_neighbors)
title_ids = np.zeros(10, dtype=np.int32)
body_ids = np.zeros(280, dtype=np.int32)
retrievals_batch = []
for top_ids in top_ids_batch:
retrievals = [
featurization.Document(0, title_ids, body_ids) for i in top_ids
]
retrievals_batch.append(retrievals)
return retrievals_batch
class BruteForceRetriever(Retriever):
"""Retrieves documents using brute force matrix multiplication."""
def __init__(self, query_embedder, documents, doc_embeds_or_path,
num_neighbors):
"""Constructs BruteForceRetriever.
Args:
query_embedder: an instance of QueryEmbedder.
documents: a list of Document objects.
doc_embeds_or_path: either a [num_docs, embed_dim] TF Tensor, or a path to
load it.
num_neighbors: number of neighbors to retrieve.
"""
total_candidates = len(documents)
self._query_embedder = query_embedder
self._num_neighbors = num_neighbors
self._documents = documents
# Load embeddings.
if isinstance(doc_embeds_or_path, str):
with tf.device('/CPU:0'):
ckpt_reader = tf.train.load_checkpoint(doc_embeds_or_path)
self._doc_embeds = ckpt_reader.get_tensor('block_emb')
else:
self._doc_embeds = doc_embeds_or_path
logging.info('Loaded document embeddings.')
# Check shapes.
if self._doc_embeds.shape[0] != total_candidates:
raise ValueError('Did not load the right number of embeddings.')
@profile.profiled_function
def retrieve(self, query_batch):
# [batch_size, embed_dim]
query_embeds = self._query_embedder.embed(query_batch)
with tf.device('/CPU:0'):
# [batch_size, total_candidates]
cand_scores = tf.matmul(query_embeds, self._doc_embeds, transpose_b=True)
_, top_ids_batch = tf.math.top_k(cand_scores, k=self._num_neighbors)
retrievals_batch = []
for top_ids in top_ids_batch:
retrievals = [self._documents[i] for i in top_ids]
retrievals_batch.append(retrievals)
return retrievals_batch
def count_tf_records(file_path):
"""Counts the number of records in a GZIP'd TFRecord file."""
gzip_option = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)
count = 0
for _ in tf.python_io.tf_record_iterator(file_path, gzip_option):
count += 1
return count
def count_tf_records_parallel_helper(args):
"""Just a helper function for count_tf_records_parallel."""
file_idx, file_path = args
return (file_idx, count_tf_records(file_path))
def count_tf_records_parallel(file_paths, num_processes=None):
"""Counts number of records in TFRecord files in parallel.
Args:
file_paths: a list of paths, where each path points to a GZIP-ed TFRecord
file.
num_processes: number of Python processes to use in parallel. If None, will
use all available CPUs.
Returns:
shard_sizes: a list of ints.
"""
num_files = len(file_paths)
with parallel.Executor(
create_worker=lambda: count_tf_records_parallel_helper,
queue_size=num_files,
num_workers=num_processes) as executor:
for file_idx, file_path in enumerate(file_paths):
executor.submit((file_idx, file_path))
counts = [None] * num_files
results = executor.results(max_to_yield=num_files)
for i, (file_idx, count) in enumerate(results):
counts[file_idx] = count
logging.info('Counted %d / %d files.', i + 1, num_files)
return counts
def load_documents(path):
"""Loads Documents from a GZIP-ed TFRecords file into a Python list."""
gzip_option = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)
def get_bytes_feature(ex, name):
return list(ex.features.feature[name].bytes_list.value)
def get_ints_feature(ex, name):
# 32-bit Numpy arrays are more memory-efficient than Python lists.
return | np.array(ex.features.feature[name].int64_list.value, dtype=np.int32) | numpy.array |
import os.path
import numpy as np
import math
from collections import namedtuple
from typing import Dict, Any, Tuple, List, Optional
from models.adaptive_model import AdaptiveModel
from models.standard_model import StandardModel
from dataset.dataset import Dataset, DataSeries
from utils.file_utils import save_by_file_suffix, read_by_file_suffix
from utils.sequence_model_utils import SequenceModelType
from utils.constants import OUTPUT, LOGITS, SEQ_LENGTH, SKIP_GATES, PHASE_GATES, STOP_OUTPUT_NAME
LOG_FILE_FMT = 'model-{0}-{1}-{2}.jsonl.gz'
ModelResults = namedtuple('ModelResults', ['predictions', 'labels', 'stop_probs', 'accuracy'])
BATCH_SIZE = 64
def clip(x: int, bounds: Tuple[int, int]) -> int:
if x > bounds[1]:
return bounds[1]
elif x < bounds[0]:
return bounds[0]
return x
def save_test_log(accuracy: float, power: float, valid_accuracy: Optional[float], budget: float, system_name: str, key: str, output_file: str):
test_log: Dict[str, Dict[str, Any]] = dict()
if os.path.exists(output_file):
test_log = list(read_by_file_suffix(output_file))[0]
if key not in test_log:
test_log[key] = dict()
log_value = {
'ACCURACY': accuracy,
'AVG_POWER': power,
'VALID_ACCURACY': valid_accuracy,
'BUDGET': budget,
'SYSTEM_NAME': system_name
}
budget_str = '{0:.4f}'.format(budget)
test_log[key][budget_str] = log_value
save_by_file_suffix([test_log], output_file)
def get_budget_index(budget: float, valid_accuracy: np.ndarray, max_time: int, power_estimates: np.ndarray, allow_violations: bool) -> int:
"""
Selects the single model level which should yield the best overall accuracy. This decision
is based on the validation accuracy for each level.
Args:
budget: The current avg power budget
valid_accuracy: A [L] array containing the validation accuracy for each model level
max_time: The number of timesteps
power_estimates: A [L] array of power estimates for each level
allow_violations: Index selected in a manner which allows for budget violations if such violations
will lead to better end-to-end accuracy.
Returns:
The "optimal" model level.
"""
best_index = 0
best_acc = 0.0
if allow_violations:
num_levels = valid_accuracy.shape[0]
energy_budget = budget * max_time
for level_idx in range(num_levels):
# Estimate the number of timesteps on which we can perform inference with this level
avg_power = power_estimates[level_idx]
projected_timesteps = min(energy_budget / avg_power, max_time)
projected_correct = valid_accuracy[level_idx] * projected_timesteps
estimated_accuracy = projected_correct / max_time
if estimated_accuracy > best_acc:
best_acc = estimated_accuracy
best_index = level_idx
else:
budget_comparison = power_estimates <= budget
if np.any(budget_comparison):
budget_mask = budget_comparison.astype(float)
masked_accuracy = valid_accuracy * budget_mask
best_index = np.argmax(masked_accuracy)
else:
best_index = np.argmin(power_estimates)
return best_index
def concat_model_results(model_results: List[ModelResults]) -> ModelResults:
"""
Stacks each field of the given results into a single array. This is useful
for Skip RNN and Phased RNN systems in which each output is a separate model.
"""
predictions = np.concatenate([r.predictions for r in model_results], axis=1) # [N, L]
labels = model_results[0].labels # [N, 1]
stop_probs = [r.stop_probs for r in model_results]
accuracy = [r.accuracy for r in model_results]
return ModelResults(predictions=predictions,
labels=labels,
stop_probs=stop_probs,
accuracy=accuracy)
def execute_adaptive_model(model: AdaptiveModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The adaptive model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results.
"""
level_predictions: List[np.ndarray] = []
stop_probs: List[np.ndarray] = []
labels: List[np.ndarray] = []
num_outputs = model.num_outputs
# Operations to execute
ops = [LOGITS, STOP_OUTPUT_NAME]
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, ops=ops)
# Concatenate logits into a [B, L, C] array (logit_ops is already ordered by level).
# For reference, L is the number of levels and C is the number of classes
logits = model_results[LOGITS]
stop_output = model_results[STOP_OUTPUT_NAME] # [B, L]
stop_probs.append(stop_output)
# Compute the predictions for each level
level_pred = np.argmax(logits, axis=-1) # [B, L]
level_predictions.append(level_pred)
labels.append(np.array(batch[OUTPUT]).reshape(-1, 1))
# Save results as attributes
level_predictions = np.concatenate(level_predictions, axis=0)
labels = np.concatenate(labels, axis=0) # [N, 1]
stop_probs = np.concatenate(stop_probs, axis=0)
level_accuracy = np.average(np.isclose(level_predictions, labels).astype(float), axis=0)
return ModelResults(predictions=level_predictions, labels=labels, stop_probs=stop_probs, accuracy=level_accuracy)
def execute_standard_model(model: StandardModel, dataset: Dataset, series: DataSeries) -> ModelResults:
"""
Executes the neural network on the given data series. We do this in a separate step
to avoid recomputing for multiple budgets. Executing the neural network is relatively expensive.
Args:
model: The standard model used to perform inference
dataset: The dataset to perform inference on
series: The data series to extract. This is usually the TEST set.
Returns:
A model result tuple containing the inference results.
"""
level_predictions: List[np.ndarray] = []
labels: List[np.ndarray] = []
# Make the batch generator. Don't shuffle so we have consistent results.
data_generator = dataset.minibatch_generator(series=series,
batch_size=BATCH_SIZE,
metadata=model.metadata,
should_shuffle=False)
for batch_num, batch in enumerate(data_generator):
# Compute the predicted log probabilities
feed_dict = model.batch_to_feed_dict(batch, is_train=False, epoch_num=0)
model_results = model.execute(feed_dict, [LOGITS])
# Compute the predictions for each level
level_pred = np.argmax(model_results[LOGITS], axis=-1) # [B, L]
level_predictions.append(level_pred)
labels.append( | np.array(batch[OUTPUT]) | numpy.array |
import numpy as np
import flopy
import flopy.utils.binaryfile as bf
import time
#################################################################################
# Setting up flopy model
#################################################################################
def model_confined(dt,
wave,
task_name = 'wave_in_aquifer_confined',
task_root = './',
exe_name = "mf2005",
Lx = 10000, # x domain length [m]
Ly = 10, # y domain length [m]
ztop = 10, # Top level [m]
zbot = 0.0, # Bottom level [m]
nlay = 1, # Number of layers
nrow = 1, # Number of rows
ncol = 15000, # Number of columns
hk = 25, # Horizontal conductivity
ss = 5e-05, # Specific storage [m-1]
dx_max = 2,
**settings):
modelname = '{}{}'.format(task_root,task_name)
### Start timer
start = time.time()
### Calculate x series
xi= np.logspace(0,np.log10(Lx),ncol+1)-1
xi[ncol] += 1
delr = np.diff(xi) # Voxel length x direction
delc= Ly / nrow # Voxel length y direction
ix_max = np.argmax(delr>dx_max)
### Define the Stress Periods
nper = len(wave) # Number of stress periods
### Create grid
mf = flopy.modflow.Modflow(modelname, exe_name=exe_name)
dis = flopy.modflow.ModflowDis(
mf,
nlay,
nrow,
ncol,
delr=delr,
delc=delc,
top=ztop,
botm=np.linspace(ztop, zbot, nlay + 1)[1:],
nper=nper,
perlen=dt*np.ones(nper), # Stress period lengths,
nstp= np.ones((nper), dtype=np.int32), # Number of timesteps for each stress period
steady= np.zeros((nper), dtype=bool),
)
gridx = dis.get_node_coordinates()[1]
headstart = ztop - min(wave-np.mean(wave)) + 1
### Assign boundary conditions
flopy.modflow.ModflowBas(
mf,
ibound=np.ones((nlay, nrow, ncol), dtype=np.int32),
strt=headstart * np.ones((nlay, nrow, ncol), dtype=np.float32),
)
### Assign hydraulic parameters
flopy.modflow.ModflowLpf(
mf,
hk=hk,
vka=hk,
ss=ss,
ipakcb=53,
)
### Load Modflow solver
flopy.modflow.ModflowPcg(mf)
### Define stress period data
stress_period_data = {}
for i in range(nper):
stageleft = wave[i] + headstart
condleft = 10000
stress_period_data[i] = [0, 0, 0, stageleft, condleft]
### Load stress period data
flopy.modflow.ModflowGhb(
mf,
stress_period_data=stress_period_data,
)
### Define output
stress_period_out = {}
for kper in range(nper):
stress_period_out[(kper, 0)] = [
"save head",
"print head",
]
### Load output file
flopy.modflow.ModflowOc(
mf,
stress_period_data=stress_period_out,
compact=True,
)
### Write model input files
mf.write_input()
### Run model
success, mfoutput = mf.run_model(silent=True, pause=False)
if not success:
raise Exception("MODFLOW did not terminate normally.")
### Create the headfile output objects
headobj = bf.HeadFile(modelname + ".hds")
times = headobj.get_times() # end time points of stress periods
### Create hydraulic head array
head_matrix = np.ones(shape=(nper,ix_max))
### Extract head files in a readable form
for i in range(nper):
head_matrix[i,:] = headobj.get_data(totim=times[i])[0][0,0:ix_max] - headstart
### stop timer
end = time.time()
### total time taken
print("Simulation finished successfully")
print(f" Runtime [s]: {end - start}")
return([times,gridx[0:ix_max],head_matrix])
def model_leakage(dt,
wave,
task_name = 'wave_in_aquifer_leakage',
task_root = './',
exe_name = "mf2005",
Lx=10000,
Ly=10,
zbot=0.0,
nlay=3,
nrow=1,
ncol=15000,
thickness_lay1 = 10,
thickness_lay2 = 1, # need to be one to be consistant with the leakage coefficient
thickness_lay3 = 10,
hk_unconfined = 25,
hk_confining = 0.01,
hk_confined = 25,
ss_confined = 5e-5,
ss_unconfined = 0.025,
fluctuation_unconfined_aquifer=False, ### regulating BC in unconfined aquifer
dx_max = 2,
**settings):
modelname = '{}{}'.format(task_root,task_name)
### Start timer
start = time.time()
### Calculate x series
xi= np.logspace(0,np.log10(Lx),ncol+1)-1
xi[ncol] += 1
delr = np.diff(xi) # Voxel length x direction
delc= Ly / nrow # Voxel length y direction
ix_max = np.argmax(delr>dx_max)
### Define the Stress Periods
nper = len(wave)
ztop = thickness_lay1 + thickness_lay2 + thickness_lay3
### Create grid
mf = flopy.modflow.Modflow(modelname, exe_name=exe_name)
dis = flopy.modflow.ModflowDis(
mf,
nlay,
nrow,
ncol,
delr=delr,
delc=delc,
top=ztop,
botm=np.array([thickness_lay3+thickness_lay2,thickness_lay3,0.0]),
nper=nper,
perlen=dt*np.ones(nper), # Number of stress periods
nstp=np.ones((nper), dtype=np.int32), # Number of timesteps for each stress period
steady = np.zeros((nper), dtype=bool), # Stress period lengths
)
gridx = dis.get_node_coordinates()[1]
### Assign boundary conditions
ibound = np.ones((nlay, nrow, ncol), dtype=np.int32)
if fluctuation_unconfined_aquifer is False:
ibound[0,:,:] = -1
headstart = ztop - min(wave-np.mean(wave)) + 1
flopy.modflow.ModflowBas(mf,
ibound=ibound,
strt = headstart * np.ones((nlay, nrow, ncol), dtype=np.float32),
)
### Assign hydraulic conductivities
hk = np.ones((nlay,nrow,ncol))
hk[0,:,:] = hk_unconfined
hk[1,:,:] = hk_confining
hk[2,:,:] = hk_confined
vka = hk
ss = | np.ones((nlay,nrow,ncol)) | numpy.ones |
"""
Class definition for the Branch_and_Bound subdriver.
This pseudo-driver can only be run when plugged into the AMIEGO driver's minlp slot.
This is the branch and bound algorithm that maximizes the constrained
expected improvement function and returns an integer infill point. The
algorithm uses the relaxation techniques proposed by Jones et.al. on their
paper on EGO,1998. This enables the algorithm to use any gradient-based
approach to obtain a global solution. Also, to satisfy the integer
constraints, a new branching scheme has been implemented.
Developed by <NAME>
School of Aeronautics & Astronautics
Purdue University, West Lafayette, IN 47906
July, 2016
Implemented in OpenMDAO, Aug 2016, <NAME>
"""
from collections import OrderedDict
import os
from time import time
import numpy as np
from scipy.special import erf
from pyDOE2 import lhs
from openmdao.core.driver import Driver
from openmdao.drivers.genetic_algorithm_driver import GeneticAlgorithm
from openmdao.utils.concurrent import concurrent_eval, concurrent_eval_lb
from openmdao.utils.general_utils import set_pyoptsparse_opt
from amiego.optimize_function import snopt_opt
# check that pyoptsparse is installed
# if it is, try to use SNOPT but fall back to SLSQP
_, OPTIMIZER = set_pyoptsparse_opt('SNOPT')
class Branch_and_Bound(Driver):
"""
Class definition for the Branch_and_Bound driver.
This pseudo-driver can only be run when plugged into the AMIEGO driver's minlp slot.
This is the branch and bound algorithm that maximizes the constrained
expected improvement function and returns an integer infill point. The
algorithm uses the relaxation techniques proposed by Jones et.al. on
their paper on EGO,1998. This enables the algorithm to use any
gradient-based approach to obtain a global solution. Also, to satisfy the
integer constraints, a new branching scheme has been implemented.
Attributes
----------
dvs : list
Cache of integer design variable names.
eflag_MINLPBB : bool
This is set to True when we find a local minimum.
fopt : ndarray
Objective value at optimal design.
obj_surrogate : <AMIEGOKrigingSurrogate>
Surrogate model of the objective as a function of the integer design vars.
xI_lb : ndarray
Lower bound of the integer design variables.
xI_ub : ndarray
Upper bound of the integer design variables.
xopt : ndarray
Optimal design.
_randomstate : np.random.RandomState, int
Random state (or seed-number) which controls the seed and random draws.
"""
def __init__(self):
"""
Initialize the Branch_and_Bound driver.
"""
super(Branch_and_Bound, self).__init__()
# What we support
self.supports['inequality_constraints'] = True
self.supports['equality_constraints'] = False
self.supports['multiple_objectives'] = False
self.supports['two_sided_constraints'] = False
self.supports['active_set'] = False
self.supports['linear_constraints'] = False
self.supports['gradients'] = False
self.supports['integer_design_vars'] = True
self.dvs = []
self.i_idx_cache = {}
self.obj_surrogate = None
# We will set this to True if we have found a minimum.
self.eflag_MINLPBB = False
# Amiego retrieves optimal design and optimum upon completion.
self.xopt = None
self.fopt = None
# Experimental Options. TODO: could go into Options
self.load_balance = True
self.aggressive_splitting = False
# Random state can be set for predictability during testing
if 'SimpleGADriver_seed' in os.environ:
self._randomstate = int(os.environ['SimpleGADriver_seed'])
else:
self._randomstate = None
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
opt = self.options
opt.declare('active_tol', 1.0e-6, lower=0.0,
desc='Tolerance (2-norm) for triggering active set '
'reduction.')
opt.declare('atol', 0.1, lower=0.0,
desc='Absolute tolerance (inf-norm) of upper minus '
'lower bound for termination.')
opt.declare('con_tol', 1.0e-6, lower=0.0,
desc='Constraint thickness.')
opt.declare('disp', True,
desc='Set to False to prevent printing of iteration '
'messages.')
opt.declare('ftol', 1.0e-4, lower=0.0,
desc='Absolute tolerance for sub-optimizations.')
opt.declare('maxiter', 100000, lower=0.0,
desc='Maximum number of iterations.')
opt.declare('trace_iter', 5,
desc='Number of generations to trace back for ubd.')
opt.declare('trace_iter_max', 10,
desc='Maximum number of generations to trace back for ubd.')
opt.declare('maxiter_ubd', 10000,
desc='Number of generations ubd stays the same')
opt.declare('local_search', 0, values=[0, 1, 2],
desc='Local search type. Set to 0 for GA, 1 for LHS, 2 for LHS + SQP '
'(Default = 0)')
def run(self):
"""
Execute the Branch_and_Bound method.
Returns
-------
boolean
Failure flag; True if failed to converge, False if successful.
"""
problem = self._problem()
obj_surrogate = self.obj_surrogate
atol = self.options['atol']
disp = self.options['disp']
maxiter = self.options['maxiter']
maxiter_ubd = self.options['maxiter_ubd']
self.iter_count = 1
self.eflag_MINLPBB = False
obj_surrogate.p = 2
obj_surrogate.y_best = np.min(obj_surrogate.Y)
# ----------------------------------------------------------------------
# Step 1: Initialize
# ----------------------------------------------------------------------
num_des = len(self.xI_lb)
node_num = 0
itercount = 0
ubd_count = 0
# Initial B&B bounds are infinite.
UBD = np.inf
LBD = -np.inf
LBD_prev = -np.inf
# Copy our desvars' user specified upper and lower bounds
xL_iter = self.xI_lb.copy()
xU_iter = self.xI_ub.copy()
num_init_sam = num_des
init_sam = lhs(num_des, samples=num_init_sam, criterion='center',
random_state=self._randomstate)
for ii in range(num_init_sam):
xopt_ii = np.round(xL_iter + init_sam[ii] * (xU_iter - xL_iter)).reshape(num_des)
fopt_ii = self.objective_callback(xopt_ii)
if fopt_ii < UBD:
self.eflag_MINLPBB = True
UBD = fopt_ii
fopt = fopt_ii
xopt = xopt_ii
# This stuff is just for printing.
par_node = 0
# Active set fields: (Updated!)
# Aset = [[NodeNumber, lb, ub, LBD, UBD, nodeHist], [], ..]
active_set = []
nodeHist = NodeHist()
UBD_term = UBD
comm = problem.model.comm
if self.load_balance:
# Master/Worker config
n_proc = comm.size - 1
if n_proc < 2:
comm = None
n_proc = 1
else:
# Each proc has its own jobs
n_proc = comm.size
if n_proc < 2:
comm = None
# Initial node. This is the data structure we pass into the concurrent evaluator.
if self.aggressive_splitting:
# Initial number of nodes based on number of available procs
args = init_nodes(n_proc, xL_iter, xU_iter, par_node, LBD_prev, LBD,
UBD, fopt, xopt, nodeHist, ubd_count)
else:
# Start with 1 node.
args = [(xL_iter, xU_iter, par_node, LBD_prev, LBD, UBD, fopt,
xopt, node_num, nodeHist, ubd_count)]
# Main Loop
terminate = False
while not terminate:
# Branch and Bound evaluation of a set of nodes, starting with the initial one.
# When executed in serial, only a single node is evaluted.
cases = [(arg, None) for arg in args]
if self.load_balance:
results = concurrent_eval_lb(self.evaluate_node, cases,
comm, broadcast=True)
else:
results = concurrent_eval(self.evaluate_node, cases,
comm, allgather=True)
itercount += len(args)
if UBD < -1.0e-3:
ubd_count += len(args)
# Put all the new nodes into active set.
for result in results:
# Print the traceback if it fails
if not result[0]:
print(result[1])
new_UBD, new_fopt, new_xopt, new_nodes = result[0]
# Save stats for the best case.
if new_UBD < UBD:
UBD = new_UBD
fopt = new_fopt
xopt = new_xopt
# Look for substantial change in UBD to reset the counter
if abs(new_UBD - UBD_term) > 0.001:
ubd_count = 1
UBD_term = new_UBD
# TODO: Should we extend the active set with all the cases we
# ran, or just the best one. All for now.
active_set.extend(new_nodes)
node_num += len(new_nodes)
# Update active set: Removes all nodes worse than the best new node.
if len(active_set) >= 1:
active_set = update_active_set(active_set, UBD)
# Termination
if len(active_set) >= 1:
# Update LBD and select the current rectangle
args = []
# Grab the best nodes, as many as we have processors.
n_nodes = np.min((n_proc, len(active_set)))
for j in range(n_nodes):
# a. Set LBD as lowest in the active set
all_LBD = [item[3] for item in active_set]
LBD = min(all_LBD)
ind_LBD = all_LBD.index(LBD)
LBD_prev = LBD
# b. Select the lowest LBD node as the current node
par_node, xL_iter, xU_iter, _, _, nodeHist = active_set[ind_LBD]
self.iter_count += 1
args.append((xL_iter, xU_iter, par_node, LBD_prev, LBD, UBD, fopt,
xopt, node_num, nodeHist, ubd_count))
# c. Delete the selected node from the Active set of nodes
del active_set[ind_LBD]
# --------------------------------------------------------------
# Step 7: Check for convergence
# --------------------------------------------------------------
diff = np.abs(UBD - LBD)
if diff < atol:
terminate = True
if disp:
print("=" * 85)
print("Terminating! Absolute difference between the upper " +
"and lower bound is below the tolerence limit.")
else:
terminate = True
if disp:
print("=" * 85)
print("Terminating! No new node to explore.")
print("Max Node", node_num)
if itercount > maxiter or ubd_count > maxiter_ubd:
terminate = True
# Finalize by putting optimal value back into openMDAO
self.xopt = xopt
self.fopt = fopt
return False
def evaluate_node(self, xL_iter, xU_iter, par_node, LBD_prev, LBD, UBD, fopt, xopt, node_num,
nodeHist, ubd_count):
"""
Perform Branch and Bound step on a single node.
This function encapsulates the portion of the code that runs in parallel.
Parameters
----------
xL_iter : ndarray
Lower bound of design variables.
xU_iter : ndarray
Upper bound of design variables.
par_node : int
Index of parent node for this child node.
LBD_prev : float
Previous iteration value of LBD.
LBD : float
Current value of lower bound estimate.
UBD : float
Current value of upper bound esimate.
fopt : float
Current best objective value
xopt : ndarray
Current best design values.
node_num : int
Index of this current node
nodeHist : <NodeHist>
Data structure containing information about this node.
ubd_count : int
Counter for number of generations.
Returns
-------
float
New upper bound estimate.
float
New best objective value.
ndaray
New design variables.
list
List of parameters for new node.
"""
if OPTIMIZER == 'SNOPT':
options = {'Major optimality tolerance': 1.0e-5}
elif OPTIMIZER == 'SLSQP':
options = {'ACC': 1.0e-5}
elif OPTIMIZER == 'CONMIN':
options = {'DABFUN': 1.0e-5}
active_tol = self.options['active_tol']
local_search = self.options['local_search']
disp = self.options['disp']
trace_iter = self.options['trace_iter']
trace_iter_max = self.options['trace_iter_max']
obj_surrogate = self.obj_surrogate
num_des = len(self.xI_lb)
new_nodes = []
# Keep this to 0.49 to always round towards bottom-left
xloc_iter = np.round(xL_iter + 0.49 * (xU_iter - xL_iter))
floc_iter = self.objective_callback(xloc_iter)
# Genetic Algorithm
if local_search == 0:
# --------------------------------------------------------------
# Step 2: Obtain a local solution using a GA.
# --------------------------------------------------------------
ga = GeneticAlgorithm(self.obj_for_GA)
bits = np.ceil(np.log2(xU_iter - xL_iter + 1)).astype(int)
bits[bits <= 0] = 1
vub_vir = (2**bits - 1) + xL_iter
# More important nodes get a higher population size and number of generations.
if nodeHist.priority_flag == 1:
max_gen = 300
mfac = 6
else:
max_gen = 200
mfac = 4
L = np.sum(bits)
pop_size = mfac * L
t0 = time()
self.xU_iter = xU_iter
xloc_iter_new, floc_iter_new, nfit = \
ga.execute_ga(xL_iter, xL_iter, vub_vir, vub_vir, bits, pop_size, max_gen,
self._randomstate)
t_GA = time() - t0
if floc_iter_new < floc_iter:
floc_iter = floc_iter_new
xloc_iter = xloc_iter_new
# LHS Sampling or SNOPT
else:
# TODO Future research on sampling here
num_samples = np.round(np.max([10, np.min([50, num_des / nodeHist.priority_flag])]))
init_sam_node = lhs(num_des, samples=num_samples, criterion='center',
random_state=self._randomstate)
t_GA = 0.
for ii in range(int(num_samples)):
xloc_iter_new = np.round(xL_iter + init_sam_node[ii] * (xU_iter - xL_iter))
floc_iter_new = self.objective_callback(xloc_iter_new)
# SNOPT
if local_search == 2:
# TODO: did we lose a tol check here?
# active_tol: #Perform at non-flat starting point
if np.abs(floc_iter_new) > -np.inf:
# --------------------------------------------------------------
# Step 2: Obtain a local solution
# --------------------------------------------------------------
# Using a gradient-based method here.
# TODO: Make it more pluggable.
def _objcall(dv_dict):
"""
Compute objective for SNOPT.
"""
fail = 0
x = dv_dict['x']
# Objective
func_dict = {}
func_dict['obj'] = self.objective_callback(x)[0]
return func_dict, fail
xC_iter = xloc_iter_new
opt_x, opt_f, succ_flag, msg = snopt_opt(_objcall, xC_iter, xL_iter,
xU_iter, title='LocalSearch',
options=options)
xloc_iter_new = np.round(np.asarray(opt_x).flatten())
floc_iter_new = self.objective_callback(xloc_iter_new)
if floc_iter_new < floc_iter:
floc_iter = floc_iter_new
xloc_iter = xloc_iter_new
# Do some prechecks before commencing for partitioning.
ubdloc_best = nodeHist.ubdloc_best
if nodeHist.ubdloc_best > floc_iter + 1.0e-6:
ubd_track = np.concatenate((nodeHist.ubd_track, np.array([0])), axis=0)
ubdloc_best = floc_iter
else:
ubd_track = np.concatenate((nodeHist.ubd_track, np.array([1])), axis=0)
# diff_LBD = abs(LBD_prev - LBD_NegConEI)
if len(ubd_track) >= trace_iter_max or \
(len(ubd_track) >= trace_iter and np.sum(ubd_track[-trace_iter:]) == 0):
# TODO : Did we lose ths? -> #and UBD<=-1.0e-3:
child_info = np.array([[par_node, np.inf, floc_iter], [par_node, np.inf, floc_iter]])
# Fathomed due to no change in UBD_loc for 'trace_iter' generations
dis_flag = ['Y', 'Y']
else:
# --------------------------------------------------------------------------
# Step 3: Partition the current rectangle as per the new branching scheme.
# --------------------------------------------------------------------------
child_info = np.zeros([2, 3])
dis_flag = [' ', ' ']
# Choose
l_iter = (xU_iter - xL_iter).argmax()
if xloc_iter[l_iter] < xU_iter[l_iter]:
delta = 0.5 # 0<delta<1
else:
delta = -0.5 # -1<delta<0
for ii in range(2):
lb = xL_iter.copy()
ub = xU_iter.copy()
if ii == 0:
ub[l_iter] = np.floor(xloc_iter[l_iter] + delta)
elif ii == 1:
lb[l_iter] = np.ceil(xloc_iter[l_iter] + delta)
if np.linalg.norm(ub - lb) > active_tol: # Not a point
# --------------------------------------------------------------
# Step 4: Obtain an LBD of f in the newly created node
# --------------------------------------------------------------
S4_fail = False
x_comL, x_comU, Ain_hat, bin_hat = gen_coeff_bound(lb, ub, obj_surrogate)
sU, eflag_sU = self.maximize_S(x_comL, x_comU, Ain_hat, bin_hat)
if eflag_sU:
yL, eflag_yL = self.minimize_y(x_comL, x_comU, Ain_hat, bin_hat)
if eflag_yL:
NegEI = calc_conEI_norm([], obj_surrogate, SSqr=sU, y_hat=yL)
else:
S4_fail = True
else:
S4_fail = True
# Convex approximation failed!
if S4_fail:
LBD_NegConEI = LBD_prev
dis_flag[ii] = 'F'
else:
LBD_NegConEI = max(NegEI, LBD_prev)
# --------------------------------------------------------------
# Step 5: Store any new node inside the active set that has LBD
# lower than the UBD.
# --------------------------------------------------------------
priority_flag = 0
if LBD_NegConEI < np.inf and LBD_prev > -np.inf:
if np.abs((LBD_prev - LBD_NegConEI) / LBD_prev) < 0.005:
priority_flag = 1
nodeHist_new = NodeHist()
nodeHist_new.ubd_track = ubd_track
nodeHist_new.ubdloc_best = ubdloc_best
nodeHist_new.priority_flag = priority_flag
if LBD_NegConEI < UBD - 1.0e-6:
node_num += 1
new_node = [node_num, lb, ub, LBD_NegConEI, floc_iter, nodeHist_new]
new_nodes.append(new_node)
child_info[ii] = np.array([node_num, LBD_NegConEI, floc_iter])
else:
child_info[ii] = np.array([par_node, LBD_NegConEI, floc_iter])
# Flag for child created but not added to active set. (fathomed)
dis_flag[ii] = 'X'
else:
if ii == 1:
xloc_iter = ub
floc_iter = self.objective_callback(xloc_iter)
child_info[ii] = np.array([par_node, np.inf, floc_iter])
# Flag for No child created
dis_flag[ii] = 'x'
# Update the active set whenever better solution found
if floc_iter < UBD:
UBD = floc_iter
fopt = floc_iter
xopt = xloc_iter.reshape(num_des)
if disp:
if (self.iter_count - 1) % 25 == 0:
# Display output in a tabular format
print("=" * 95)
print("%19s%12s%14s%21s" % ("Global", "Parent", "Child1", "Child2"))
template = "%s%8s%10s%8s%9s%11s%10s%11s%11s%11s"
print(template % ("Iter", "LBD", "UBD", "Node", "Node1", "LBD1",
"Node2", "LBD2", "Flocal", "GA time"))
print("=" * 95)
template = "%3d%10.2f%10.2f%6d%8d%1s%13.2f%8d%1s%13.2f%9.2f%9.2f"
print(template % (self.iter_count, LBD, UBD, par_node, child_info[0, 0],
dis_flag[0], child_info[0, 1], child_info[1, 0],
dis_flag[1], child_info[1, 1], child_info[1, 2], t_GA))
return UBD, fopt, xopt, new_nodes
def objective_callback(self, xI):
"""
Evalute main problem objective at the requested point.
Objective is the expected improvement function with modifications to make it concave.
Parameters
----------
xI : ndarray
Value of design variables.
Returns
-------
float
Objective value
"""
obj_surrogate = self.obj_surrogate
# Normalized as per the convention in openmdao_Alpha:Kriging.
xval = (xI - obj_surrogate.X_mean) / obj_surrogate.X_std
NegEI = calc_conEI_norm(xval, obj_surrogate)
# print(xI, f)
return NegEI
def maximize_S(self, x_comL, x_comU, Ain_hat, bin_hat):
"""
Maximize the SigmaSqr Error.
This method finds an upper bound to the SigmaSqr Error, and scales up 'r' to provide a
smooth design space for gradient-based approach.
Parameters
----------
x_comL : ndarray
Full lower bounds vector
x_comU : ndarray
Full upper bounds vector.
Ain_hat : ndarray
Matrix Ain_hat for linear model of constraints.
bin_hat : ndarray
Vector bin_hat for linear model of constraints.
Returns
-------
float
Maximized upper bound for sigma squared error.
bool
Success flag True if successful.
"""
if OPTIMIZER == 'SNOPT':
options = {'Major optimality tolerance': 1.0e-5}
elif OPTIMIZER == 'SLSQP':
options = {'ACC': 1.0e-5}
elif OPTIMIZER == 'CONMIN':
options = {'DABFUN': 1.0e-5}
surrogate = self.obj_surrogate
R_inv = surrogate.R_inv
SigmaSqr = surrogate.SigmaSqr
X = surrogate.X
n, k = X.shape
one = np.ones([n, 1])
xhat_comL = x_comL.copy()
xhat_comU = x_comU.copy()
xhat_comL[k:] = 0.0
xhat_comU[k:] = 1.0
# Calculate the convexity factor alpha
rL = x_comL[k:]
rU = x_comU[k:]
dr_drhat = np.diag(rU[:, 0] - rL[:, 0])
T2_num = np.dot(np.dot(R_inv, one), np.dot(R_inv, one).T)
T2_den = np.dot(one.T, np.dot(R_inv, one))
d2S_dr2 = 2.0 * SigmaSqr * (R_inv - (T2_num / T2_den))
H_hat = np.dot(np.dot(dr_drhat, d2S_dr2), dr_drhat)
# Use Gershgorin's circle theorem to find a lower bound of the
# min eigen value of the hessian
eig_lb = np.zeros([n, 1])
for ii in range(n):
dia_ele = H_hat[ii, ii]
sum_rw = 0.0
sum_col = 0.0
for jj in range(n):
if ii != jj:
sum_rw += np.abs(H_hat[ii, jj])
sum_col += np.abs(H_hat[jj, ii])
eig_lb[ii] = dia_ele - np.min(np.array([sum_rw, sum_col]))
eig_min = np.min(eig_lb)
alpha = np.max(np.array([0.0, -0.5 * eig_min]))
# Maximize S
x0 = 0.5 * (xhat_comL + xhat_comU)
# Just storing stuff here to pull it out in the callback.
surrogate._alpha = alpha
self.x_comL = x_comL
self.x_comU = x_comU
self.xhat_comL = xhat_comL
self.xhat_comU = xhat_comU
self.Ain_hat = Ain_hat
self.bin_hat = bin_hat
opt_x, opt_f, succ_flag, msg = snopt_opt(self.calc_SSqr_convex, x0, xhat_comL,
xhat_comU, ncon=len(bin_hat),
title='Maximize_S',
options=options,
jac=Ain_hat,
sens=self.calc_SSqr_convex_grad)
Neg_sU = opt_f
# if not succ_flag:
# eflag_sU = False
# else:
# eflag_sU = True
eflag_sU = True
tol = self.options['con_tol']
for ii in range(2 * n):
if np.dot(Ain_hat[ii, :], opt_x) > (bin_hat[ii] + tol):
eflag_sU = False
break
sU = - Neg_sU
return sU, eflag_sU
def calc_SSqr_convex(self, dv_dict):
"""
Callback function for minimization of mean squared error.
Parameters
----------
dv_dict : dict
Dictionary of design variable values.
Returns
-------
func_dict : dict
Dictionary of all functional variables evaluated at design point.
fail : int
0 for successful function evaluation
1 for unsuccessful function evaluation
"""
fail = 0
x_com = dv_dict['x']
surrogate = self.obj_surrogate
R_inv = surrogate.R_inv
SigmaSqr = surrogate.SigmaSqr
alpha = surrogate._alpha
n, k = surrogate.X.shape
one = np.ones([n, 1])
rL = self.x_comL[k:]
rU = self.x_comU[k:]
rhat = x_com[k:].reshape(n, 1)
r = rL + rhat * (rU - rL)
rhat_L = self.xhat_comL[k:]
rhat_U = self.xhat_comU[k:]
term0 = np.dot(R_inv, r)
term1 = -SigmaSqr * (1.0 - r.T.dot(term0) +
((1.0 - one.T.dot(term0))**2 / (one.T.dot(np.dot(R_inv, one)))))
term2 = alpha * (rhat - rhat_L).T.dot(rhat - rhat_U)
S2 = term1 + term2
# Objectives
func_dict = {}
func_dict['obj'] = S2[0, 0]
# Constraints
Ain_hat = self.Ain_hat
bin_hat = self.bin_hat
func_dict['con'] = np.dot(Ain_hat, x_com) - bin_hat
# print('x', dv_dict)
# print('obj', func_dict['obj'])
return func_dict, fail
def calc_SSqr_convex_grad(self, dv_dict, func_dict):
"""
Callback function for gradient of mean squared error.
Parameters
----------
dv_dict : dict
Dictionary of design variable values.
func_dict : dict
Dictionary of all functional variables evaluated at design point.
Returns
-------
sens_dict : dict
Dictionary of dictionaries for gradient of each dv/func pair
fail : int
0 for successful function evaluation
1 for unsuccessful function evaluation
"""
fail = 0
x_com = dv_dict['x']
surrogate = self.obj_surrogate
X = surrogate.X
R_inv = surrogate.R_inv
SigmaSqr = surrogate.SigmaSqr
alpha = surrogate._alpha
n, k = X.shape
nn = len(x_com)
one = np.ones([n, 1])
rL = self.x_comL[k:]
rU = self.x_comU[k:]
rhat = x_com[k:].reshape(n, 1)
r = rL + rhat * (rU - rL)
rhat_L = self.xhat_comL[k:]
rhat_U = self.xhat_comU[k:]
dr_drhat = np.diag((rU - rL).flat)
term0 = np.dot(R_inv, r)
term1 = ((1.0 - one.T.dot(term0)) / (one.T.dot(np.dot(R_inv, one)))) * np.dot(R_inv, one)
term = 2.0 * SigmaSqr * (term0 + term1)
dterm1 = np.dot(dr_drhat, term)
dterm2 = alpha * (2.0 * rhat - rhat_L - rhat_U)
dobj_dr = (dterm1 + dterm2).T
# Objectives
sens_dict = OrderedDict()
sens_dict['obj'] = OrderedDict()
sens_dict['obj']['x'] = np.zeros((1, nn))
sens_dict['obj']['x'][:, k:] = dobj_dr
# Constraints
Ain_hat = self.Ain_hat
sens_dict['con'] = OrderedDict()
sens_dict['con']['x'] = Ain_hat
# print('obj deriv', sens_dict['obj']['x'] )
# print('con deriv', sens_dict['con']['x'])
return sens_dict, fail
def minimize_y(self, x_comL, x_comU, Ain_hat, bin_hat):
"""
Minimize the lower bound.
Parameters
----------
x_comL : ndarray
Full lower bounds vector
x_comU : ndarray
Full upper bounds vector.
Ain_hat : ndarray
Matrix Ain_hat for linear model of constraints.
bin_hat : ndarray
Vector bin_hat for linear model of constraints.
Returns
-------
float
Maximized upper bound for sigma squared error.
bool
Success flag True if successful.
"""
if OPTIMIZER == 'SNOPT':
options = {'Major optimality tolerance': 1.0e-8}
elif OPTIMIZER == 'SLSQP':
options = {'ACC': 1.0e-8}
elif OPTIMIZER == 'CONMIN':
options = {'DABFUN': 1.0e-8}
# 1- Formulates y_hat as LP (weaker bound)
# 2- Uses non-convex relaxation technique (stronger bound) [Future release]
app = 1
surrogate = self.obj_surrogate
X = surrogate.X
n, k = X.shape
xhat_comL = x_comL.copy()
xhat_comU = x_comU.copy()
xhat_comL[k:] = 0.0
xhat_comU[k:] = 1.0
if app == 1:
x0 = 0.5 * (xhat_comL + xhat_comU)
# Just storing stuff here to pull it out in the callback.
self.x_comL = x_comL
self.x_comU = x_comU
self.Ain_hat = Ain_hat
self.bin_hat = bin_hat
opt_x, opt_f, succ_flag, msg = snopt_opt(self.calc_y_hat_convex, x0, xhat_comL,
xhat_comU, ncon=len(bin_hat),
title='minimize_y',
options=options,
jac=Ain_hat,
sens=self.calc_y_hat_convex_grad)
yL = opt_f
# if not succ_flag:
# eflag_yL = False
# else:
# eflag_yL = True
eflag_yL = True
tol = self.options['con_tol']
for ii in range(2 * n):
if np.dot(Ain_hat[ii, :], opt_x) > (bin_hat[ii] + tol):
eflag_yL = False
break
return yL, eflag_yL
def calc_y_hat_convex(self, dv_dict):
"""
Callback function for objective during minimization of y_hat.
Parameters
----------
dv_dict : dict
Dictionary of design variable values.
Returns
-------
func_dict : dict
Dictionary of all functional variables evaluated at design point.
fail : int
0 for successful function evaluation
1 for unsuccessful function evaluation
"""
fail = 0
x_com = dv_dict['x']
surrogate = self.obj_surrogate
X = surrogate.X
c_r = surrogate.c_r
mu = surrogate.mu
n, k = X.shape
rL = self.x_comL[k:]
rU = self.x_comU[k:]
rhat = np.array([x_com[k:]]).reshape(n, 1)
r = rL + rhat * (rU - rL)
y_hat = mu + np.dot(r.T, c_r)
# Objective
func_dict = {}
func_dict['obj'] = y_hat[0, 0]
# Constraints
Ain_hat = self.Ain_hat
bin_hat = self.bin_hat
func_dict['con'] = np.dot(Ain_hat, x_com) - bin_hat
# print('x', dv_dict)
# print('obj', func_dict['obj'])
return func_dict, fail
def calc_y_hat_convex_grad(self, dv_dict, func_dict):
"""
Callback function for gradient during minimization of y_hat.
Parameters
----------
dv_dict : dict
Dictionary of design variable values.
func_dict : dict
Dictionary of all functional variables evaluated at design point.
Returns
-------
sens_dict : dict
Dictionary of dictionaries for gradient of each dv/func pair
fail : int
0 for successful function evaluation
1 for unsuccessful function evaluation
"""
fail = 0
x_com = dv_dict['x']
surrogate = self.obj_surrogate
X = surrogate.X
c_r = surrogate.c_r
n, k = X.shape
nn = len(x_com)
rL = self.x_comL[k:]
rU = self.x_comU[k:]
dobj_dr = c_r * (rU - rL)
# Objectives
sens_dict = OrderedDict()
sens_dict['obj'] = OrderedDict()
sens_dict['obj']['x'] = np.zeros((1, nn))
sens_dict['obj']['x'][:, k:] = dobj_dr.T
# Constraints
Ain_hat = self.Ain_hat
sens_dict['con'] = OrderedDict()
sens_dict['con']['x'] = Ain_hat
# print('obj deriv', sens_dict['obj']['x'] )
# print('con deriv', sens_dict['con']['x'])
return sens_dict, fail
def obj_for_GA(self, x, icase):
"""
Evalute main problem objective at the requested point.
Objective is the expected improvement function with modifications to make it concave.
Parameters
----------
x : ndarray
Value of design variables.
icase : int
Case number, used for identification when run in parallel.
Returns
-------
float
Objective value
bool
Success flag, True if successful
int
Case number, used for identification when run in parallel.
"""
surrogate = self.obj_surrogate
xU_iter = self.xU_iter
num_des = len(x)
P = 0.0
rp = 100.0
g = x / xU_iter - 1.0
idx = np.where(g > 0.0)
if len(idx) > 0:
P = np.einsum('i->', g[idx]**2)
xval = (x - surrogate.X_mean) / surrogate.X_std
NegEI = calc_conEI_norm(xval, surrogate)
f = NegEI + rp * P
return f, True, icase
def update_active_set(active_set, ubd):
"""
Update the active set.
Remove variables from the active set data structure if their current upper bound exceeds the
given value.
Parameters
----------
active_set : list of lists of floats
Active set data structure of form [[NodeNumber, lb, ub, LBD, UBD], [], ..]
ubd : float
Maximum for bounds test.
Returns
-------
list of list of floats
New active_set
"""
return [a for a in active_set if a[3] < ubd]
def gen_coeff_bound(xI_lb, xI_ub, surrogate):
"""
Generate upper and lower bounds for r.
This function generates the upper and lower bound of the artificial
variable r and the coefficients for the linearized under estimator
constraints. The version accepts design bound in the original design
space, converts it to normalized design space.
Parameters
----------
xI_lb : ndarray
Lower bound of the integer design variables.
xI_ub : ndarray
Upper bound of the integer design variables.
surrogate : <AMIEGOKrigingSurrogate>
Surrogate model of optimized objective with respect to integer design variables.
Returns
-------
ndarray
Full lower bounds vector
ndarray
Full upper bounds vector.
ndarray
Matrix Ain_hat for linear model of constraints.
ndarray
Vector bin_hat for linear model of constraints.
"""
mean = surrogate.X_mean
std = surrogate.X_std
# Normalized as per Openmdao kriging model
xL_hat = (xI_lb - mean) / std
xU_hat = (xI_ub - mean) / std
rL, rU = interval_analysis(xL_hat, xU_hat, surrogate)
# Combined design variables for supbproblem
num = len(xL_hat) + len(rL)
x_comL = np.append(xL_hat, rL).reshape(num, 1)
x_comU = np.append(xU_hat, rU).reshape(num, 1)
# Coefficients of the linearized constraints of the subproblem
Ain_hat, bin_hat = lin_underestimator(x_comL, x_comU, surrogate)
return x_comL, x_comU, Ain_hat, bin_hat
def interval_analysis(lb_x, ub_x, surrogate):
"""
Predict lower and upper bounds for r.
The module predicts the lower and upper bound of the artificial variable 'r' from the bounds
of the design variable x r is related to x by the following equation:
r_i = exp(-sum(theta_h*(x_h - x_h_i)^2))
Parameters
----------
lb_x : ndarray
Lower bound of the integer design variables.
ub_x : ndarray
Upper bound of the integer design variables.
surrogate : <AMIEGOKrigingSurrogate>
Surrogate model of optimized objective with respect to integer design variables.
Returns
-------
ndarray
Predicted lower bound for r
ndarray
Predicted upper bound for r
"""
p = surrogate.p
if p % 2 == 0:
X = surrogate.X
thetas = surrogate.thetas
n, k = X.shape
t3L = np.empty([n, k])
t3U = np.empty([n, k])
t1L = lb_x - X
t1U = ub_x - X
fac1 = t1L * t1L
fac2 = t1L * t1U
fac3 = t1U * t1U
for i in range(n):
for h in range(k):
fact = np.array([fac1[i, h], fac2[i, h], fac3[i, h]])
t2L = np.max(np.array([0, np.min(fact)]))
t2U = np.max(np.array([0, np.max(fact)]))
fact = -thetas[h] * np.array([t2L, t2U])
t3L[i, h] = np.min(fact)
t3U[i, h] = np.max(fact)
lb_r = np.exp(np.sum(t3L, axis=1))
ub_r = np.exp(np.sum(t3U, axis=1))
else:
print("\nWarning! Value of p should be 2. Cannot perform interval analysis")
print("\nReturing global bound of the r variable")
lb_r = np.zeros([n, k])
ub_r = np.zeros([n, k])
return lb_r, ub_r
def lin_underestimator(lb, ub, surrogate):
"""
Compute the coefficients of the linearized underestimator constraints.
Parameters
----------
lb : ndarray
Lower bound vector.
ub : ndarray
Upper bound vector
surrogate : <AMIEGOKrigingSurrogate>
Surrogate model of optimized objective with respect to integer design variables.
Returns
-------
ndarray
Matrix Ain_hat for linear model of constraints.
ndarray
Vector bin_hat for linear model of constraints.
"""
X = surrogate.X
thetas = surrogate.thetas
p = surrogate.p
n, k = X.shape
lb_x = lb[:k]
ub_x = ub[:k]
lb_r = lb[k:]
ub_r = ub[k:]
a1_hat = np.zeros([n, n])
a3_hat = np.zeros([n, n])
a2 = np.empty([n, k])
a4 = np.empty([n, k])
b2 = np.empty([n, k])
b4 = np.empty([n, k])
b1_hat = np.empty([n, ])
b3_hat = np.empty([n, ])
dist_r = ub_r - lb_r
dist_x = ub_x - lb_x
x_m = 0.5 * (ub_x + lb_x)
r_m = 0.5 * (lb_r + ub_r)
ub_fact = (ub_x - X.T)**p
lb_fact = (lb_x - X.T)**p
fact_p = (x_m - X.T)**p
fact_pm1 = (x_m - X.T)**(p - 1)
for i in range(n):
# T1: Linearize under-estimator of ln[r_i] = a1*r[i] + b1
if ub_r[i] <= lb_r[i]:
a1 = 0.0
else:
a1 = (np.log(ub_r[i]) - | np.log(lb_r[i]) | numpy.log |
import sys
import warnings
import re
import xml.etree.ElementTree
import io
import uuid
import struct
import pathlib
import jnius_config
import numpy as np
import scipy.spatial.distance
import scipy.fft
import skimage.util
import skimage.util.dtype
import skimage.io
import skimage.exposure
import skimage.transform
import sklearn.linear_model
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.cm as mcm
import matplotlib.patches as mpatches
import matplotlib.patheffects as mpatheffects
from . import utils
from . import thumbnail
from . import __version__ as _version
if not jnius_config.vm_running:
pkg_root = pathlib.Path(__file__).parent.resolve()
bf_jar_path = pkg_root / 'jars' / 'loci_tools.jar'
if not bf_jar_path.exists():
raise RuntimeError("loci_tools.jar missing from distribution"
" (expected it at %s)" % bf_jar_path)
jnius_config.add_classpath(str(bf_jar_path))
import jnius
DebugTools = jnius.autoclass('loci.common.DebugTools')
IFormatReader = jnius.autoclass('loci.formats.IFormatReader')
MetadataRetrieve = jnius.autoclass('ome.xml.meta.MetadataRetrieve')
ServiceFactory = jnius.autoclass('loci.common.services.ServiceFactory')
OMEXMLService = jnius.autoclass('loci.formats.services.OMEXMLService')
ChannelSeparator = jnius.autoclass('loci.formats.ChannelSeparator')
DynamicMetadataOptions = jnius.autoclass('loci.formats.in.DynamicMetadataOptions')
UNITS = jnius.autoclass('ome.units.UNITS')
DebugTools.enableLogging("ERROR")
# TODO:
# - Write tables with summary information about alignments.
class Metadata(object):
@property
def _num_images(self):
raise NotImplementedError
@property
def num_channels(self):
raise NotImplementedError
@property
def pixel_size(self):
raise NotImplementedError
@property
def pixel_dtype(self):
raise NotImplementedError
def tile_position(self, i):
raise NotImplementedError
def tile_size(self, i):
raise NotImplementedError
@property
def grid_dimensions(self):
pos = self.positions
shape = np.array([len(set(pos[:, d])) for d in range(2)])
if np.prod(shape) != self.num_images:
raise ValueError("Series positions do not form a grid")
return shape
@property
def num_images(self):
return self._num_images
@property
def positions(self):
if not hasattr(self, '_positions'):
self._positions = np.vstack([
self.tile_position(i) for i in range(self._num_images)
])
return self._positions
@property
def size(self):
if not hasattr(self, '_size'):
s0 = self.tile_size(0)
image_ids = range(1, self._num_images)
if any(any(self.tile_size(i) != s0) for i in image_ids):
raise ValueError("Image series must all have the same dimensions")
self._size = s0
return self._size
@property
def centers(self):
return self.positions + self.size / 2
@property
def origin(self):
return self.positions.min(axis=0)
class PlateMetadata(Metadata):
def __init__(self):
super(PlateMetadata, self).__init__()
self.set_active_plate_well(None, None)
@property
def num_plates(self):
raise NotImplementedError
@property
def num_wells(self):
raise NotImplementedError
@property
def plate_well_series(self):
raise NotImplementedError
def plate_name(self, i):
raise NotImplementedError
def well_name(self, plate, i):
raise NotImplementedError
def set_active_plate_well(self, plate, well):
if (plate is None) ^ (well is None):
raise ValueError("plate and well must be both set or both None")
self.active_plate = plate
self.active_well = well
@property
def active_series(self):
if self.active_plate is None:
return range(self._num_images)
else:
return self.plate_well_series[self.active_plate][self.active_well]
@property
def plate_names(self):
if not hasattr(self, '_plate_names'):
self._plate_names = [
self.plate_name(i) for i in range(self.num_plates)
]
return self._plate_names
@property
def well_names(self):
if not hasattr(self, '_well_names'):
self._well_names = [
[self.well_name(p, i) for i in range(num_plate_wells)]
for p, num_plate_wells in enumerate(self.num_wells)
]
return self._well_names
@Metadata.num_images.getter
def num_images(self):
return len(self.active_series)
@Metadata.positions.getter
def positions(self):
return Metadata.positions.fget(self)[self.active_series]
# FIXME Metadata.grid_dimensions should be overriden here or removed.
class Reader(object):
def read(self, series, c):
raise NotImplementedError
class PlateReader(Reader):
# No API here, just a way to signal that a subclass's metadata class
# inherits from PlateMetadata. This is probably a sign that the
# architectural split between Metadata and Reader should be reconsidered.
pass
class BioformatsMetadata(PlateMetadata):
_pixel_dtypes = {
'uint8': np.dtype(np.uint8),
'uint16': np.dtype(np.uint16),
}
_ome_dtypes = {v: k for k, v in _pixel_dtypes.items()}
def __init__(self, path):
super(BioformatsMetadata, self).__init__()
self.path = path
self._init_metadata()
def __getstate__(self):
state = self.__dict__.copy()
del state['_reader'], state['_metadata'], state['_omexml_root']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._init_metadata()
def _init_metadata(self):
factory = ServiceFactory()
service = jnius.cast(OMEXMLService, factory.getInstance(OMEXMLService))
metadata = service.createOMEXMLMetadata()
self._reader = ChannelSeparator()
self._reader.setMetadataStore(metadata)
# For multi-scene .CZI files, we need raw tiles instead of the
# auto-stitched mosaic and we don't want labels or overview images
options = DynamicMetadataOptions()
options.setBoolean('zeissczi.autostitch', False)
options.setBoolean('zeissczi.attachments', False)
self._reader.setMetadataOptions(options)
self._reader.setId(self.path)
xml_content = service.getOMEXML(metadata)
self._metadata = jnius.cast(MetadataRetrieve, metadata)
self._omexml_root = xml.etree.ElementTree.fromstring(xml_content)
self.format_name = self._reader.getFormat()
@property
def _num_images(self):
count = self._metadata.imageCount
# Skip final overview slide in Metamorph Slide Scan data if present.
if (self.format_name == 'Metamorph STK'
and 'overview' in self._metadata.getImageName(count - 1).lower()):
count -= 1
return count
@property
def num_channels(self):
return self._metadata.getChannelCount(0)
@property
def num_plates(self):
return self._metadata.getPlateCount()
@property
def num_wells(self):
return [self._metadata.getWellCount(i) for i in range(self.num_plates)]
@property
def plate_well_series(self):
if hasattr(self, '_plate_well_series'):
return self._plate_well_series
# FIXME Store slice objects to save resources where possible.
series = [
[
np.array([
self._metadata.getWellSampleIndex(p, w, s).value
for s in range(self._metadata.getWellSampleCount(p, w))
], dtype=int)
for w in range(num_wells)
]
for p, num_wells in enumerate(self.num_wells)
]
return series
@property
def pixel_size(self):
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPixelsPhysicalSize%s' % dim)
v_units = method(0)
if v_units is None:
warn_data(
"Pixel size undefined; falling back to 1.0 \u03BCm."
)
value = 1.0
else:
value = v_units.value(UNITS.MICROMETER).doubleValue()
values.append(value)
if values[0] != values[1]:
raise Exception("Can't handle non-square pixels (%f, %f)"
% tuple(values))
return values[0]
@property
def pixel_dtype(self):
return self._pixel_dtypes[self._metadata.getPixelsType(0).value]
def plate_name(self, i):
return self._metadata.getPlateName(i)
@property
def well_naming(self):
if not hasattr(self, '_well_naming'):
_well_naming = []
for p in range(self.num_plates):
row_nc = self._metadata.getPlateRowNamingConvention(p)
column_nc = self._metadata.getPlateColumnNamingConvention(p)
if row_nc is not None:
row_nc = row_nc.value
else:
row_nc = 'letter'
if column_nc is not None:
column_nc = column_nc.value
else:
column_nc = 'number'
if row_nc not in ('letter', 'number') or column_nc != 'number':
raise RuntimeError(
"Can't handle well naming convention row={} column={}"
.format(row_nc, column_nc)
)
_well_naming.append([row_nc, column_nc])
self._well_naming = _well_naming
return self._well_naming
def well_name(self, plate, i):
row = self._metadata.getWellRow(plate, i).value
column = self._metadata.getWellColumn(plate, i).value
row_nc, column_nc = self.well_naming[plate]
# FIXME Support formatting with 384/1536-well plates.
assert row_nc in ('letter', 'number')
assert column_nc == 'number'
if row_nc == 'number':
row_fmt = '{:02}'.format(row + 1)
else:
row_fmt = chr(ord('A') + row)
column_fmt = '{:02}'.format(column + 1)
return row_fmt + column_fmt
def tile_position(self, i):
planeCount = self._metadata.getPlaneCount(i)
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPlanePosition%s' % dim)
# FIXME verify all planes have the same X,Y position.
if planeCount > 0:
# Returns None if planePositionX/Y not defined.
v_units = method(i, 0)
else:
# Simple file formats don't have planes at all.
v_units = None
if v_units is None:
warn_data(
"Stage coordinates undefined; falling back to (0, 0)."
)
values = [0.0, 0.0]
break
else:
v = v_units.value(UNITS.MICROMETER)
if v is None:
# Conversion failed, which usually happens when the unit is
# "reference frame". Proceed as if it's actually microns but
# emit a warning.
warn_data(
"Stage coordinates' measurement unit is undefined;"
" assuming \u03BCm."
)
v = v_units.value()
value = v.doubleValue()
values.append(value)
position_microns = np.array(values, dtype=float)
# Invert Y so that stage position coordinates and image pixel
# coordinates are aligned (most formats seem to work this way).
position_microns *= [-1, 1]
position_pixels = position_microns / self.pixel_size
return position_pixels
def tile_size(self, i):
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPixelsSize%s' % dim)
v = method(i).value
values.append(v)
return np.array(values, dtype=int)
class BioformatsReader(PlateReader):
def __init__(self, path, plate=None, well=None):
self.path = path
self.metadata = BioformatsMetadata(self.path)
self.metadata.set_active_plate_well(plate, well)
def read(self, series, c):
self.metadata._reader.setSeries(self.metadata.active_series[series])
index = self.metadata._reader.getIndex(0, c, 0)
byte_array = self.metadata._reader.openBytes(index)
dtype = self.metadata.pixel_dtype
shape = self.metadata.tile_size(series)
img = np.frombuffer(byte_array.tostring(), dtype=dtype).reshape(shape)
return img
class CachingReader(Reader):
"""Wraps a reader to provide tile image caching."""
def __init__(self, reader, channel):
self.reader = reader
self.channel = channel
self._cache = {}
@property
def metadata(self):
return self.reader.metadata
def read(self, series, c):
if c == self.channel and series in self._cache:
img = self._cache[series]
else:
img = self.reader.read(series, c)
if c == self.channel and series not in self._cache:
self._cache[series] = img
return img
# TileStatistics = collections.namedtuple(
# 'TileStatistics',
# 'scan tile x_original y_original x y shift_x shift_y error'
# )
@property
def neighbors_graph(aligner):
"""Return graph of neighboring (overlapping) tiles.
Tiles are considered neighbors if the 'city block' distance between them
is less than the largest tile dimension.
"""
# FIXME: This should properly test for overlap, possibly via
# intersection of bounding rectangles.
if not hasattr(aligner, '_neighbors_graph'):
pdist = scipy.spatial.distance.pdist(aligner.metadata.positions,
metric='cityblock')
sp = scipy.spatial.distance.squareform(pdist)
max_distance = aligner.metadata.size.max() + 1
edges = zip(*np.nonzero((sp > 0) & (sp < max_distance)))
graph = nx.from_edgelist(edges)
graph.add_nodes_from(range(aligner.metadata.num_images))
aligner._neighbors_graph = graph
return aligner._neighbors_graph
class EdgeAligner(object):
def __init__(
self, reader, channel=0, max_shift=15, false_positive_ratio=0.01,
randomize=False, filter_sigma=0.0, do_make_thumbnail=True, verbose=False
):
self.channel = channel
self.reader = CachingReader(reader, self.channel)
self.verbose = verbose
# Unit is micrometers.
self.max_shift = max_shift
self.max_shift_pixels = self.max_shift / self.metadata.pixel_size
self.false_positive_ratio = false_positive_ratio
self.randomize = randomize
self.filter_sigma = filter_sigma
self.do_make_thumbnail = do_make_thumbnail
self._cache = {}
neighbors_graph = neighbors_graph
def run(self):
self.make_thumbnail()
self.check_overlaps()
self.compute_threshold()
self.register_all()
self.build_spanning_tree()
self.calculate_positions()
self.fit_model()
def make_thumbnail(self):
if not self.do_make_thumbnail:
return
self.reader.thumbnail = thumbnail.make_thumbnail(
self.reader, channel=self.channel
)
def check_overlaps(self):
# This might be better addressed by removing the +1 from the
# neighbors_graph max_distance calculation and ensuring the graph is
# fully connected.
pos = self.metadata.positions
overlaps = np.array([
self.metadata.size - abs(pos[t1] - pos[t2])
for t1, t2 in self.neighbors_graph.edges
])
failures = np.any(overlaps < 1, axis=1) if len(overlaps) else []
if len(failures) and all(failures):
warn_data("No tiles overlap, attempting alignment anyway.")
elif any(failures):
warn_data("Some neighboring tiles have zero overlap.")
def compute_threshold(self):
# Compute error threshold for rejecting aligments. We generate a
# distribution of error scores for many known non-overlapping image
# regions and take a certain percentile as the maximum allowable error.
# The percentile becomes our accepted false-positive ratio.
edges = self.neighbors_graph.edges
num_tiles = self.metadata.num_images
# If not enough tiles overlap to matter, skip this whole thing.
if len(edges) <= 1:
self.errors_negative_sampled = np.empty(0)
self.max_error = np.inf
return
widths = np.array([
self.intersection(t1, t2).shape.min()
for t1, t2 in edges
])
w = widths.max()
max_offset = self.metadata.size[0] - w
# Number of possible pairs minus number of actual neighbor pairs.
num_distant_pairs = num_tiles * (num_tiles - 1) // 2 - len(edges)
# Reduce permutation count for small datasets -- there are fewer
# possible truly distinct strips with fewer tiles. The calculation here
# is just a heuristic, not rigorously derived.
n = 1000 if num_distant_pairs > 8 else (num_distant_pairs + 1) * 10
pairs = | np.empty((n, 2), dtype=int) | numpy.empty |
import os, sys, trimesh, matplotlib.pyplot as pyplot, numpy as np, time, random, progressbar, json
from plyfile import PlyData, PlyElement
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.6f')
from subprocess import call
from collections import deque
from imageio import imread
colors = [[0, 0, 1], [1, 0, 0], [0, 1, 0],
[0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.3, 0.6, 0], [0.6, 0, 0.3], [0.3, 0, 0.6],
[0.6, 0.3, 0], [0.3, 0, 0.6], [0.6, 0, 0.3],
[0.8, 0.2, 0.5]]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC
"""
if replace is None: replace = (pc.shape[0] < num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b, :, :]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize, vsize, vsize))
voxel = 2 * radius / float(vsize)
locations = (points + radius) / voxel
locations = locations.astype(int)
vol[locations[:, 0], locations[:, 1], locations[:, 2]] = 1.0
return vol
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert (vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a, b, c] == 1:
points.append(np.array([a, b, c]))
if len(points) == 0:
return np.zeros((0, 3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b, :, :], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize, vsize, vsize, num_sample, 3))
voxel = 2 * radius / float(vsize)
locations = (points + radius) / voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n, :])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n, :])
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i, j, k) not in loc2pc:
vol[i, j, k, :, :] = np.zeros((num_sample, 3))
else:
pc = loc2pc[(i, j, k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0] > num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0] < num_sample:
pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), (0, 0)), 'edge')
# Normalize
pc_center = (np.array([i, j, k]) + 0.5) * voxel - radius
pc = (pc - pc_center) / voxel # shift and scale
vol[i, j, k, :, :] = pc
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b, :, :], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2 * radius / float(imgsize)
locations = (points[:, 0:2] + radius) / pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n, :])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n, :])
for i in range(imgsize):
for j in range(imgsize):
if (i, j) not in loc2pc:
img[i, j, :, :] = np.zeros((num_sample, 3))
else:
pc = loc2pc[(i, j)]
pc = np.vstack(pc)
if pc.shape[0] > num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0] < num_sample:
pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), (0, 0)), 'edge')
pc_center = (np.array([i, j]) + 0.5) * pixel - radius
pc[:, 0:2] = (pc[:, 0:2] - pc_center) / pixel
img[i, j, :, :] = pc
return img
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x, y, z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_color(points, labels, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
vertex = []
# colors = [pyplot.cm.jet(i / float(num_classes)) for i in range(num_classes)]
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x * 255) for x in c]
vertex.append((points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
vertex = np.array(vertex,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=True).write(filename)
return colors
def merge_mesh_with_color(meshes):
face_colors = [mesh.visual.face_colors for mesh in meshes]
vertex_colors = [mesh.visual.vertex_colors for mesh in meshes]
vertice_list = [mesh.vertices for mesh in meshes]
faces_list = [mesh.faces for mesh in meshes]
faces_offset = np.cumsum([v.shape[0] for v in vertice_list])
faces_offset = np.insert(faces_offset, 0, 0)[:-1]
vertices = np.vstack(vertice_list)
faces = np.vstack([face + offset for face, offset in zip(faces_list, faces_offset)])
vertex_colors = np.vstack(vertex_colors)
face_colors = np.vstack(face_colors)
# print(vertex_colors.shape, faces.shape, vertices.shape)
# exit(0)
merged_meshes = trimesh.Trimesh(vertices, faces, face_colors=face_colors, vertex_colors=vertex_colors)
return merged_meshes
def write_ply_bbox_color(vertices, vertex_colors, edges, edge_colors, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
vertex = []
for i in range(len(vertices)):
vertex.append((vertices[i, 0], vertices[i, 1], vertices[i, 2], vertex_colors[i, 0],
vertex_colors[i, 1], vertex_colors[i, 2]))
vertex = np.array(vertex,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
edge = []
for i in range(len(edges)):
edge.append((edges[i, 0], edges[i, 1], edge_colors[i, 0], edge_colors[i, 1], edge_colors[i, 2]))
edge = np.array(edge,
dtype=[('vertex1', 'i4'), ('vertex2', 'i4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
e1 = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
e2 = PlyElement.describe(edge, 'edge', comments=['edges'])
PlyData([e1, e2], text=True).write(filename)
def write_bbox_color_json(scene_bbox, label, out_filename, num_classes=None, colormap=pyplot.cm.jet):
labels = label.astype(int)
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
used_color = {}
ret = []
for i, box in enumerate(scene_bbox):
c = colors[label[i]]
c = (np.array(c) * 255).astype(np.uint8)
item_i = [float(box[0]), float(box[1]), float(box[2]), float(box[3]), float(box[4]), float(box[5]),
int(c[0]), int(c[1]), int(c[2])]
used_color[label[i]] = c
#item_i = [str(_) for _ in item_i]
ret.append(item_i)
with open(out_filename, 'w') as f:
json.dump(ret, f)
return used_color
def write_bbox_color(scene_bbox, label, out_filename, num_classes=None, colormap=pyplot.cm.jet, edge=False):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
labels = label.astype(int)
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
def convert_box_to_trimesh_fmt(box, color):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
mesh = trimesh.creation.box(lengths, trns)
color = np.array(color) * 255
face_colors = np.array([color] * mesh.faces.shape[0], np.uint8)
vertex_colors = np.array([color] * mesh.vertices.shape[0], np.uint8)
#print(face_colors, vertex_colors, box_trimesh_fmt.vertices, box_trimesh_fmt.faces)
#exit(0)
box_visual = trimesh.visual.create_visual(
vertex_colors=vertex_colors,
face_colors=face_colors,
mesh=mesh)
mesh.visual = box_visual
# print(edges.shape)
# exit(0)
# print(box_trimesh_fmt.visual.face_colors)
#print(face_colors)
#print(box_visual.__dict__)
#print(box_trimesh_fmt.visual.__dict__)
#exit(0)
#, facecolors=color, vertex_color=color)
#print(box_trimesh_fmt.__dict__)
#exit(0)
return mesh
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
scene = []
ret = []
for i, box in enumerate(scene_bbox):
ret.append(colors[label[i]])
scene.append(convert_box_to_trimesh_fmt(box, colors[label[i]]))
mesh = merge_mesh_with_color(scene)
if edge:
sharp = mesh.face_adjacency_angles > np.radians(40)
edges = mesh.face_adjacency_edges[sharp]
assert edges.shape[0] % 12 == 0
edge_colors = mesh.visual.vertex_colors[edges[:, 0]]
#print(edges.shape, edge_colors.shape)
#exit(0)
write_ply_bbox_color(mesh.vertices, mesh.visual.vertex_colors, edges, edge_colors, out_filename)
else:
trimesh.exchange.export.export_mesh(mesh, out_filename, file_type='ply')
#print(mesh_list.visual.mesh.visual.__dict__)
# save to ply file
# ply = trimesh.exchange.ply.export_ply(mesh_list, encoding='ascii')
#trimesh.exchange.export.export_mesh(mesh_list, out_filename, file_type='ply') #, encoding='ascii')
# print(ply)
# exit(0)
# out_filename
return ret
def write_ply_rgb(points, colors, out_filename, num_classes=None):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i, :]
fout.write('v %f %f %f %d %d %d\n' % (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
fout.close()
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:, 0], points[:, 1], points[:, 2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
pyplot.savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def rotate_point_cloud(points, rotation_matrix=None):
""" Input: (n,3), Output: (n,3) """
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
ctr = points.mean(axis=0)
rotated_data = np.dot(points - ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
# ----------------------------------------
# BBox
# ----------------------------------------
def bbox_corner_dist_measure(crnr1, crnr2):
""" compute distance between box corners to replace iou
Args:
crnr1, crnr2: Nx3 points of box corners in camera axis (y points down)
output is a scalar between 0 and 1
"""
dist = sys.maxsize
for y in range(4):
rows = ([(x + y) % 4 for x in range(4)] + [4 + (x + y) % 4 for x in range(4)])
d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0
if d_ < dist:
dist = d_
u = sum([np.linalg.norm(x[0, :] - x[6, :]) for x in [crnr1, crnr2]]) / 2.0
measure = max(1.0 - dist / u, 0)
print(measure)
return measure
def point_cloud_to_bbox(points):
""" Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5 * (mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[1, 1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0, :] = np.array([cosval, 0, sinval])
rotmat[2, :] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src, tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0, 0, 1], vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3, 3] = 0.5 * src + 0.5 * tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(trimesh.creation.cylinder(radius=rad, height=height, sections=res, transform=M))
mesh_list = trimesh.util.concatenate(scene.dump())
trimesh.io.export.export_mesh(mesh_list, '%s.ply' % (filename), file_type='ply')
def normalize_pts(pts):
out = np.array(pts, dtype=np.float32)
center = np.mean(out, axis=0)
out -= center
scale = np.sqrt(np.max(np.sum(out ** 2, axis=1)))
out /= scale
return out
def load_obj(fn, no_normal=False):
fin = open(fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
vertices = [];
normals = [];
faces = [];
for line in lines:
if line.startswith('v '):
vertices.append(np.float32(line.split()[1:4]))
elif line.startswith('vn '):
normals.append(np.float32(line.split()[1:4]))
elif line.startswith('f '):
faces.append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))
mesh = dict()
mesh['faces'] = np.vstack(faces)
mesh['vertices'] = np.vstack(vertices)
if (not no_normal) and (len(normals) > 0):
assert len(normals) == len(vertices), 'ERROR: #vertices != #normals'
mesh['normals'] = np.vstack(normals)
return mesh
def export_obj_submesh_label(obj_fn, label_fn):
fin = open(obj_fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
face_ids = [];
cur_id = 0;
for line in lines:
if line.startswith('f '):
face_ids.append(cur_id)
elif line.startswith('g '):
cur_id += 1
fout = open(label_fn, 'w')
for i in range(len(face_ids)):
fout.write('%d\n' % face_ids[i])
fout.close()
def load_obj_with_submeshes(fn):
fin = open(fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
vertices = [];
submesh_id = -1;
submesh_names = [];
faces = dict();
for line in lines:
if line.startswith('v '):
vertices.append(np.float32(line.split()[1:4]))
elif line.startswith('f '):
faces[submesh_id].append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))
elif line.startswith('g '):
submesh_names.append(line.split()[1])
submesh_id += 1
faces[submesh_id] = []
vertice_arr = np.vstack(vertices)
mesh = dict()
mesh['names'] = submesh_names
mesh['tot'] = submesh_id + 1
out_vertices = dict()
out_faces = dict()
for i in range(submesh_id + 1):
data = np.vstack(faces[i]).astype(np.int32)
out_vertice_ids = np.array(list(set(data.flatten())), dtype=np.int32) - 1
vertice_map = {out_vertice_ids[x] + 1: x + 1 for x in range(len(out_vertice_ids))}
out_vertices[i] = vertice_arr[out_vertice_ids, :]
data = np.vstack(faces[i])
cur_out_faces = np.zeros(data.shape, dtype=np.float32)
for x in range(data.shape[0]):
for y in range(data.shape[1]):
cur_out_faces[x, y] = vertice_map[data[x, y]]
out_faces[i] = cur_out_faces
mesh['vertices'] = out_vertices
mesh['faces'] = out_faces
return mesh
def load_off(fn):
fin = open(fn, 'r')
line = fin.readline()
line = fin.readline()
num_vertices = int(line.split()[0])
num_faces = int(line.split()[1])
vertices = np.zeros((num_vertices, 3)).astype(np.float32)
for i in range(num_vertices):
vertices[i, :] = np.float32(fin.readline().split())
faces = np.zeros((num_faces, 3)).astype(np.int32)
for i in range(num_faces):
faces[i, :] = np.int32(fin.readline().split()[1:]) + 1
fin.close()
mesh = dict()
mesh['faces'] = faces
mesh['vertices'] = vertices
return mesh
def rotate_pts(pts, theta=0, phi=0):
rotated_data = np.zeros(pts.shape, dtype=np.float32)
# rotate along y-z axis
rotation_angle = phi / 90 * np.pi / 2
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[1, 0, 0],
[0, cosval, sinval],
[0, -sinval, cosval]])
rotated_pts = np.dot(pts, rotation_matrix)
# rotate along x-z axis
rotation_angle = theta / 360 * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_pts = np.dot(rotated_pts, rotation_matrix)
return rotated_pts
def load_pts(fn):
with open(fn, 'r') as fin:
lines = [item.rstrip() for item in fin]
pts = np.array([[float(line.split()[0]), float(line.split()[1]), float(line.split()[2])] for line in lines],
dtype=np.float32)
return pts
def load_pts_nor(fn):
with open(fn, 'r') as fin:
lines = [item.rstrip() for item in fin]
pts = np.array([[float(line.split()[0]), float(line.split()[1]), float(line.split()[2])] for line in lines],
dtype=np.float32)
nor = np.array([[float(line.split()[3]), float(line.split()[4]), float(line.split()[5])] for line in lines],
dtype=np.float32)
return pts, nor
def load_label(fn):
with open(fn, 'r') as fin:
lines = [item.rstrip() for item in fin]
label = np.array([int(line) for line in lines], dtype=np.int32)
return label
def export_obj(out, v, f):
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
for i in range(f.shape[0]):
fout.write('f %d %d %d\n' % (f[i, 0], f[i, 1], f[i, 2]))
def export_label(out, label):
with open(out, 'w') as fout:
for i in range(label.shape[0]):
fout.write('%d\n' % label[i])
def export_pts(out, v):
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('%f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
def export_pts_with_normal(out, v, n):
assert v.shape[0] == n.shape[0], 'v.shape[0] != v.shape[0]'
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('%f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], n[i, 0], n[i, 1], n[i, 2]))
def export_ply(out, v):
with open(out, 'w') as fout:
fout.write('ply\n');
fout.write('format ascii 1.0\n');
fout.write('element vertex ' + str(v.shape[0]) + '\n');
fout.write('property float x\n');
fout.write('property float y\n');
fout.write('property float z\n');
fout.write('end_header\n');
for i in range(v.shape[0]):
fout.write('%f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
def export_ply_with_label(out, v, l):
num_colors = len(colors)
with open(out, 'w') as fout:
fout.write('ply\n');
fout.write('format ascii 1.0\n');
fout.write('element vertex ' + str(v.shape[0]) + '\n');
fout.write('property float x\n');
fout.write('property float y\n');
fout.write('property float z\n');
fout.write('property uchar red\n');
fout.write('property uchar green\n');
fout.write('property uchar blue\n');
fout.write('end_header\n');
for i in range(v.shape[0]):
cur_color = colors[l[i] % num_colors]
fout.write('%f %f %f %d %d %d\n' % (v[i, 0], v[i, 1], v[i, 2], \
int(cur_color[0] * 255), int(cur_color[1] * 255),
int(cur_color[2] * 255)))
def export_ply_with_normal(out, v, n):
assert v.shape[0] == n.shape[0], 'v.shape[0] != v.shape[0]'
with open(out, 'w') as fout:
fout.write('ply\n');
fout.write('format ascii 1.0\n');
fout.write('element vertex ' + str(v.shape[0]) + '\n');
fout.write('property float x\n');
fout.write('property float y\n');
fout.write('property float z\n');
fout.write('property float nx\n');
fout.write('property float ny\n');
fout.write('property float nz\n');
fout.write('end_header\n');
for i in range(v.shape[0]):
fout.write('%f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], n[i, 0], n[i, 1], n[i, 2]))
def sample_points_from_obj(label_fn, obj_fn, pts_fn, num_points, verbose=False):
cmd = 'MeshSample -n%d -s3 -l %s %s %s> /dev/null' % (num_points, label_fn, obj_fn, pts_fn)
if verbose: print(cmd)
call(cmd, shell=True)
with open(pts_fn, 'r') as fin:
lines = [line.rstrip() for line in fin]
pts = np.array([[line.split()[0], line.split()[1], line.split()[2]] for line in lines], dtype=np.float32)
label = np.array([int(line.split()[-1].split('"')[1]) for line in lines], dtype=np.int32)
if verbose: print('get pts: ', pts.shape)
return pts, label
def sample_points(v, f, label=None, num_points=200, verbose=False):
tmp_obj = str(time.time()).replace('.', '_') + '_' + str(random.random()).replace('.', '_') + '.obj'
tmp_pts = tmp_obj.replace('.obj', '.pts')
tmp_label = tmp_obj.replace('.obj', '.label')
if label is None:
label = np.zeros((f.shape[0]), dtype=np.int32)
export_obj(tmp_obj, v, f)
export_label(tmp_label, label)
pts, fid = sample_points_from_obj(tmp_label, tmp_obj, tmp_pts, num_points=num_points, verbose=verbose)
cmd = 'rm -rf %s %s %s' % (tmp_obj, tmp_pts, tmp_label)
call(cmd, shell=True)
return pts, fid
def export_pts_with_color(out, pc, label):
num_point = pc.shape[0]
with open(out, 'w') as fout:
for i in range(num_point):
cur_color = label[i]
fout.write('%f %f %f %d %d %d\n' % (pc[i, 0], pc[i, 1], pc[i, 2], cur_color[0], cur_color[1], cur_color[2]))
def export_pts_with_label(out, pc, label, base=0):
num_point = pc.shape[0]
num_colors = len(colors)
with open(out, 'w') as fout:
for i in range(num_point):
cur_color = colors[label[i] % num_colors]
fout.write('%f %f %f %f %f %f\n' % (pc[i, 0], pc[i, 1], pc[i, 2], cur_color[0], cur_color[1], cur_color[2]))
def export_pts_with_keypoints(out, pc, kp_list):
num_point = pc.shape[0]
with open(out, 'w') as fout:
for i in range(num_point):
if i in kp_list:
color = [1.0, 0.0, 0.0]
else:
color = [0.0, 0.0, 1.0]
fout.write('%f %f %f %f %f %f\n' % (pc[i, 0], pc[i, 1], pc[i, 2], color[0], color[1], color[2]))
def compute_boundary_labels(pc, seg, radius=0.05):
num_points = len(seg)
assert num_points == pc.shape[0]
assert pc.shape[1] == 3
bdr = np.zeros((num_points)).astype(np.int32)
square_sum = np.sum(pc * pc, axis=1)
A = np.tile(np.expand_dims(square_sum, axis=0), [num_points, 1])
B = np.tile(np.expand_dims(square_sum, axis=1), [1, num_points])
C = np.dot(pc, pc.T)
dist = A + B - 2 * C
for i in range(num_points):
neighbor_seg = seg[dist[i, :] < radius ** 2]
if len(set(neighbor_seg)) > 1:
bdr[i] = 1
return bdr
def render_obj(out, v, f, delete_img=False, flat_shading=True):
tmp_obj = out.replace('.png', '.obj')
export_obj(tmp_obj, v, f)
if flat_shading:
cmd = 'RenderShape -0 %s %s 600 600 > /dev/null' % (tmp_obj, out)
else:
cmd = 'RenderShape %s %s 600 600 > /dev/null' % (tmp_obj, out)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s' % (tmp_obj)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_obj_with_label(out, v, f, label, delete_img=False, base=0):
tmp_obj = out.replace('.png', '.obj')
tmp_label = out.replace('.png', '.label')
label += base
export_obj(tmp_obj, v, f)
export_label(tmp_label, label)
cmd = 'RenderShape %s -l %s %s 600 600 > /dev/null' % (tmp_obj, tmp_label, out)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s %s' % (tmp_obj, tmp_label)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_pts_with_label(out, pts, label, delete_img=False, base=0, point_size=6):
tmp_pts = out.replace('.png', '.pts')
tmp_label = out.replace('.png', '.label')
label += base
export_pts(tmp_pts, pts)
export_label(tmp_label, label)
cmd = 'RenderShape %s -l %s %s 600 600 -p %d > /dev/null' % (tmp_pts, tmp_label, out, point_size)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s %s' % (tmp_pts, tmp_label)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_pts(out, pts, delete_img=False, point_size=6, point_color='FF0000FF'):
tmp_pts = out.replace('.png', '.pts')
export_pts(tmp_pts, pts)
cmd = 'RenderShape %s %s 600 600 -p %d -c %s > /dev/null' % (tmp_pts, out, point_size, point_color)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s' % tmp_pts
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_pts_with_keypoints(out, pts, kp_list, delete_img=False, \
point_size=6, fancy_kp=False, fancy_kp_num=20, fancy_kp_radius=0.02):
tmp_pts = out.replace('.png', '.pts')
tmp_label = out.replace('.png', '.label')
num_point = pts.shape[0]
labels = np.ones((num_point), dtype=np.int32) * 14
for idx in kp_list:
labels[idx] = 13
if fancy_kp:
num_kp = len(kp_list)
more_pts = np.zeros((num_kp * fancy_kp_num, 3), dtype=np.float32)
more_labels = np.ones((num_kp * fancy_kp_num), dtype=np.int32) * 13
for i, idx in enumerate(kp_list):
for j in range(fancy_kp_num):
x = np.random.randn()
y = np.random.randn()
z = np.random.randn()
l = np.sqrt(x ** 2 + y ** 2 + z ** 2)
x = x / l * fancy_kp_radius + pts[idx, 0]
y = y / l * fancy_kp_radius + pts[idx, 1]
z = z / l * fancy_kp_radius + pts[idx, 2]
more_pts[i * fancy_kp_num + j, 0] = x
more_pts[i * fancy_kp_num + j, 1] = y
more_pts[i * fancy_kp_num + j, 2] = z
pts = np.concatenate((pts, more_pts), axis=0)
labels = np.concatenate((labels, more_labels), axis=0)
export_pts(tmp_pts, pts)
export_label(tmp_label, labels)
cmd = 'RenderShape %s -l %s %s 600 600 -p %d > /dev/null' % (tmp_pts, tmp_label, out, point_size)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s %s' % (tmp_pts, tmp_label)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def compute_normal(pts, neighbor=50):
l = pts.shape[0]
assert (l > neighbor)
t = np.sum(pts ** 2, axis=1)
A = np.tile(t, (l, 1))
C = np.array(A).T
B = np.dot(pts, pts.T)
dist = A - 2 * B + C
neigh_ids = dist.argsort(axis=1)[:, :neighbor]
vec_ones = np.ones((neighbor, 1)).astype(np.float32)
normals = np.zeros((l, 3)).astype(np.float32)
for idx in range(l):
D = pts[neigh_ids[idx, :], :]
cur_normal = np.dot(np.linalg.pinv(D), vec_ones)
cur_normal = np.squeeze(cur_normal)
len_normal = np.sqrt(np.sum(cur_normal ** 2))
normals[idx, :] = cur_normal / len_normal
if np.dot(normals[idx, :], pts[idx, :]) < 0:
normals[idx, :] = -normals[idx, :]
return normals
def transfer_label_from_pts_to_obj(vertices, faces, pts, label):
assert pts.shape[0] == label.shape[0], 'ERROR: #pts != #label'
num_pts = pts.shape[0]
num_faces = faces.shape[0]
face_centers = []
for i in range(num_faces):
face_centers.append(
(vertices[faces[i, 0] - 1, :] + vertices[faces[i, 1] - 1, :] + vertices[faces[i, 2] - 1, :]) / 3)
face_center_array = np.vstack(face_centers)
A = np.tile(np.expand_dims(np.sum(face_center_array ** 2, axis=1), axis=0), [num_pts, 1])
B = np.tile(np.expand_dims(np.sum(pts ** 2, axis=1), axis=1), [1, num_faces])
C = | np.dot(pts, face_center_array.T) | numpy.dot |
import os.path
import glob
import datetime
import numpy as np
import netCDF4 as nc4
class PInterpError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return msg
class NameList:
fields = ['PRES','TT','GHT','RH']
met_em_fields = [
dict(name = 'LANDUSEF', unit='category', desc = '24-category USGS landuse', order = 'XYZ'),
dict(name = 'SOILCTOP', unit='category', desc = '16-category top-layer soil type', order = 'XYZ'),
dict(name = 'SOIL_LAYERS', unit='cm', desc = '', order = 'XYZ'),
dict(name = 'SOILHGT', unit='m', desc = 'Terrain field of source analysis', order = 'XY'),
dict(name = 'PMSL', unit='Pa', desc = 'Sea-level Pressure', order = 'XY')]
def __init__(self, nldict):
self.__dict__ = nldict
self.canonicalize()
def canonicalize(self):
for k, d in self.__dict__.items():
if isinstance(d, str):
self.__dict__[k] = d.strip()
if self.process not in ['all', 'list']:
raise PInterpError('Invalide setting for process')
for f in self.fields:
if not f in NameList.fields:
raise PInterpError('Invalide setting for fields ')
if self.met_em_output: # line 324
if self.process != 'all':
raise PInterpError('Process must be "all" if met_em_output==True')
if self.interp_levels[0] < 950:
raise PInterpError('''
ERROR: Lowest pressure level set in interp_levels in the
nldict is above 950 mb. met_em needs surface data.
Include a pressure level close to the surface: 1000 mb
and other mandatory levels: 925, 850, 700, 500, 400, 300, 250, 200, 150, 100''') # line 334
self.extrapolate = True
self.split_output = True
self.unstagger_grid = False
surface = self.interp_levels[0]+5
self.interp_levels = [surface] ++ self.interp_levels
self.interp_levels = np.array(self.interp_levels)
self.path_to_input = self.path_to_input.rstrip(os.sep)
self.path_to_output = self.path_to_output.rstrip(os.sep)
if self.process == 'all':
self.fields = NameList.fields
class Interpolator:
def __init__(self, namelist):
self.namelist = NameList(namelist)
self.filenames = glob.glob(os.path.join(self.namelist.path_to_input, self.namelist.input_name))
if not self.filenames: # line 345
raise PInterpError('No input file specified')
self.collect_mefields() # add extra fields for met_em_output
# output summary if required
# self.pressures = self.namelist.interp_levels * 100
for ifilename in self.filenames:
print(ifilename)
self.interp_file(ifilename)
def collect_mefields(self):
if self.namelist.process == 'all' and self.namelist.met_em_output:
ncfile = nc4.Dataset(self.filenames[0])
mes = [me for me in NameList.met_em_fields if me['name'] not in ncfile.variables]
self.namelist.fields.extend(me['name'] for me in NameList.met_em_fields
if me['name'] not in ncfile.variables)
ncfile.close()
def interp_file(self, filename):
ifilename = os.path.basename(filename)
# num_metgrid_levels = self.namelist.interp_levels.size
incfile = nc4.Dataset(filename)
dimensions, variables, gattributes = incfile.dimensions, incfile.variables, incfile.__dict__ # .copy()
self.debug('input file has {} dimensions, {} variables, and {} global attributes'
.format(len(dimensions), len(variables), len(gattributes)))
diag_processed = False
met_em_processed = False
timevar = incfile.variables['Times']
timestrs = | np.empty(timevar.shape, dtype='U20') | numpy.empty |
import torch
import os
import time
import numpy as np
from torch.optim.lr_scheduler import LambdaLR
from torchtext.legacy.data import Iterator
from dataloader import load_data
from model import CharCNN
from inference import test
PAD_TOKEN = '<pad>'
# train model
def train(**kwargs):
training_data, validation_data = load_data(**kwargs)
n_classes = len(training_data.fields['lang'].vocab)
char_vocab_size = len(training_data.fields['chars'].vocab)
padding_idx = training_data.fields['chars'].vocab.stoi[PAD_TOKEN]
print(n_classes, char_vocab_size, padding_idx)
gpu = True if torch.cuda.is_available() and kwargs['use_cuda'] else False
device = torch.device(type='cuda') if gpu else torch.device(type='cpu')
training_iterator = Iterator(training_data, kwargs['batch_size'], train=True,
sort_within_batch=True, device=device, repeat=False)
validation_iterator = Iterator(validation_data, kwargs['batch_size'], train=False, sort_within_batch=True,
device=device, repeat=False)
# our model
model = CharCNN(char_vocab_size, padding_idx, emb_dim=kwargs['emb_dim'],
dropout_p=kwargs['dropout'], n_classes=n_classes, max_seq_length=kwargs['max_chars'])
model.cuda()
# optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=kwargs['learning_rate'], momentum=0.9)
scheduler = LambdaLR(optimizer, lr_lambda=lambda t: 0.8 ** (t / 3))
num_iter_per_epoch = len(training_iterator)
best_accuracy = 0
batch_accuracies, epoch_accuracies = [], []
if kwargs['output_dir'] is None:
output_dir = os.path.join(
"./results",
f"lid_model_{time.strftime('%Y%m%d_%H%M%S')}",
)
os.makedirs(output_dir)
output_file = open(os.path.join(output_dir, "logs.txt"), "w")
model.train()
# training loop
for epoch in range(kwargs['num_epochs']):
losses = []
scheduler.step() # changed since v1.1.0
for iter, batch in enumerate(training_iterator):
# train the model; basically telling on what to train
optimizer.zero_grad()
# get the inputs
if kwargs['level'] == 'char':
sequence = batch.chars[0]
lengths = batch.chars[1]
target = batch.lang
pad_idx = training_iterator.dataset.fields['chars'].vocab.stoi[PAD_TOKEN]
criterion = torch.nn.CrossEntropyLoss(ignore_index=pad_idx)
else:
sequence = batch.paragraph[0]
lengths = batch.paragraph[1]
char_lengths = batch.paragraph[2]
target = batch.lang
pad_idx = training_iterator.dataset.fields['paragraph'].vocab.stoi[PAD_TOKEN]
criterion = torch.nn.CrossEntropyLoss(ignore_index=pad_idx)
batch_size = sequence.shape[0]
# forward pass: compute predicted y by passing x to the model
predictions = model.forward(sequence)
# compute loss
loss = criterion(predictions, target.squeeze(1))
losses.append(loss.item())
_, predicted_languages = torch.topk(predictions, 1)
# print(predicted_languages)
# acuracy calculation
batch_accuracy = target.eq(predicted_languages).sum().item()/batch_size
batch_accuracies.append(batch_accuracy)
epoch_accuracies.append(batch_accuracy)
# compute gradients
loss.backward()
optimizer.step()
print("Training: Iteration: {}/{} Epoch: {}/{} Loss: {}"
" Accuracy: {}, Learning rate: {}".format(iter + 1,
num_iter_per_epoch,
epoch + 1, kwargs['num_epochs'],
round(loss.item(),4),
round(batch_accuracies[-1], 3),
round(scheduler.get_last_lr()[0], 5)
))
# evaluation of validation data
train_accuracy = | np.array(batch_accuracies) | numpy.array |
#import matplotlib
#matplotlib.use('TkAgg')
from numpy import arange, sin, pi
import sys
sys.path.append("../")
from appJar import gui
def press(btn):
if btn == "Update":
t = arange(0.2, 20.0, 0.1)
s = sin(int(app.getEntry("space"))*pi*t)
app.updatePlot("p1", t, s)
else:
fig = app.getPlotWidget("p1").fig
print("aaa", canvas, figure)
ax = figure.add_subplot(1,1,1)
ax.plot([1, 2, 3, 4])
ax.set_ylabel('example')
t = | arange(0.0, 3.0, 0.01) | numpy.arange |
import copy
import pdb
import numpy as np
from scipy import signal
from sklearn.preprocessing import normalize
from wfdb.processing.basic import get_filter_gain
from wfdb.processing.peaks import find_local_peaks
from wfdb.io.record import Record
class XQRS(object):
"""
The QRS detector class for the XQRS algorithm. The `XQRS.Conf`
class is the configuration class that stores initial parameters
for the detection. The `XQRS.detect` method runs the detection algorithm.
The process works as follows:
- Load the signal and configuration parameters.
- Bandpass filter the signal between 5 and 20 Hz, to get the
filtered signal.
- Apply moving wave integration (MWI) with a Ricker
(Mexican hat) wavelet onto the filtered signal, and save the
square of the integrated signal.
- Conduct learning if specified, to initialize running
parameters of noise and QRS amplitudes, the QRS detection
threshold, and recent R-R intervals. If learning is unspecified
or fails, use default parameters. See the docstring for the
`_learn_init_params` method of this class for details.
- Run the main detection. Iterate through the local maxima of
the MWI signal. For each local maxima:
- Check if it is a QRS complex. To be classified as a QRS,
it must come after the refractory period, cross the QRS
detection threshold, and not be classified as a T-wave
if it comes close enough to the previous QRS. If
successfully classified, update running detection
threshold and heart rate parameters.
- If not a QRS, classify it as a noise peak and update
running parameters.
- Before continuing to the next local maxima, if no QRS
was detected within 1.66 times the recent R-R interval,
perform backsearch QRS detection. This checks previous
peaks using a lower QRS detection threshold.
Attributes
----------
sig : 1d ndarray
The input ECG signal to apply the QRS detection on.
fs : int, float
The sampling frequency of the input signal.
conf : XQRS.Conf object, optional
The configuration object specifying signal configuration
parameters. See the docstring of the XQRS.Conf class.
Examples
--------
>>> import wfdb
>>> from wfdb import processing
>>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
>>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
>>> xqrs.detect()
>>> wfdb.plot_items(signal=sig, ann_samp=[xqrs.qrs_inds])
"""
def __init__(self, sig, fs, conf=None):
if sig.ndim != 1:
raise ValueError('sig must be a 1d numpy array')
self.sig = sig
self.fs = fs
self.sig_len = len(sig)
self.conf = conf or XQRS.Conf()
self._set_conf()
class Conf(object):
"""
Initial signal configuration object for this QRS detector.
Attributes
----------
hr_init : int, float, optional
Initial heart rate in beats per minute. Used for calculating
recent R-R intervals.
hr_max : int, float, optional
Hard maximum heart rate between two beats, in beats per
minute. Used for refractory period.
hr_min : int, float, optional
Hard minimum heart rate between two beats, in beats per
minute. Used for calculating recent R-R intervals.
qrs_width : int, float, optional
Expected QRS width in seconds. Used for filter widths
indirect refractory period.
qrs_thr_init : int, float, optional
Initial QRS detection threshold in mV. Use when learning
is False, or learning fails.
qrs_thr_min : int, float, string, optional
Hard minimum detection threshold of QRS wave. Leave as 0
for no minimum.
ref_period : int, float, optional
The QRS refractory period.
t_inspect_period : int, float, optional
The period below which a potential QRS complex is
inspected to see if it is a T-wave.
"""
def __init__(self, hr_init=75, hr_max=200, hr_min=25, qrs_width=0.1,
qrs_thr_init=0.13, qrs_thr_min=0, ref_period=0.2,
t_inspect_period=0.36):
if hr_min < 0:
raise ValueError("'hr_min' must be >= 0")
if not hr_min < hr_init < hr_max:
raise ValueError("'hr_min' < 'hr_init' < 'hr_max' must be True")
if qrs_thr_init < qrs_thr_min:
raise ValueError("qrs_thr_min must be <= qrs_thr_init")
self.hr_init = hr_init
self.hr_max = hr_max
self.hr_min = hr_min
self.qrs_width = qrs_width
self.qrs_radius = self.qrs_width / 2
self.qrs_thr_init = qrs_thr_init
self.qrs_thr_min = qrs_thr_min
self.ref_period = ref_period
self.t_inspect_period = t_inspect_period
def _set_conf(self):
"""
Set configuration parameters from the Conf object into the detector
object. Time values are converted to samples, and amplitude values
are in mV.
Parameters
----------
N/A
Returns
-------
N/A
"""
self.rr_init = 60 * self.fs / self.conf.hr_init
self.rr_max = 60 * self.fs / self.conf.hr_min
self.rr_min = 60 * self.fs / self.conf.hr_max
# Note: if qrs_width is odd, qrs_width == qrs_radius*2 + 1
self.qrs_width = int(self.conf.qrs_width * self.fs)
self.qrs_radius = int(self.conf.qrs_radius * self.fs)
self.qrs_thr_init = self.conf.qrs_thr_init
self.qrs_thr_min = self.conf.qrs_thr_min
self.ref_period = int(self.conf.ref_period * self.fs)
self.t_inspect_period = int(self.conf.t_inspect_period * self.fs)
def _bandpass(self, fc_low=5, fc_high=20):
"""
Apply a bandpass filter onto the signal, and save the filtered
signal.
Parameters
----------
fc_low : int, float
The low frequency cutoff for the filter.
fc_high : int, float
The high frequency cutoff for the filter.
Returns
-------
N/A
"""
self.fc_low = fc_low
self.fc_high = fc_high
b, a = signal.butter(2, [float(fc_low) * 2 / self.fs,
float(fc_high) * 2 / self.fs], 'pass')
self.sig_f = signal.filtfilt(b, a, self.sig[self.sampfrom:self.sampto],
axis=0)
# Save the passband gain (x2 due to double filtering)
self.filter_gain = get_filter_gain(b, a, np.mean([fc_low, fc_high]),
self.fs) * 2
def _mwi(self):
"""
Apply moving wave integration (MWI) with a Ricker (Mexican hat)
wavelet onto the filtered signal, and save the square of the
integrated signal. The width of the hat is equal to the QRS width.
After integration, find all local peaks in the MWI signal.
Parameters
----------
N/A
Returns
-------
N/A
"""
wavelet_filter = signal.ricker(self.qrs_width, 4)
self.sig_i = signal.filtfilt(wavelet_filter, [1], self.sig_f,
axis=0) ** 2
# Save the MWI gain (x2 due to double filtering) and the total
# gain from raw to MWI
self.mwi_gain = get_filter_gain(wavelet_filter, [1],
| np.mean([self.fc_low, self.fc_high]) | numpy.mean |
import os
import numpy as np
import torch
from utils import MODEL_FILE_LIST, MAX_PER_DICT
perDir = 'perRes'
if not os.path.isdir(perDir):
os.mkdir(perDir)
all_res = []
for attack_name in ['L2', 'Linf']:
for model_file in MODEL_FILE_LIST:
task_name = model_file.split('.')[0]
avg_res = np.zeros([3, 7])
max_res = np.zeros([3, 7])
delta_list = []
for attack_type in range(7):
adv_file = 'adv/' + str(attack_type) + '_' + attack_name + '_' + task_name + '.adv'
res = torch.load(adv_file)
ori_img, adv_img = [], []
for data in res:
ori_img.extend(data[0][0])
adv_img.extend(data[1][0])
ori_img = torch.stack(ori_img)
adv_img = torch.stack(adv_img)
delta = adv_img - ori_img
delta = delta.reshape([len(delta), -1])
if attack_name == 'L2':
delta = torch.norm(delta, p=2, dim=1).unsqueeze(1)
else:
delta = torch.norm(delta, p=np.inf, dim=1).unsqueeze(1)
delta = delta.cpu().numpy().reshape([-1, 1])
delta_list.append(delta)
delta_list = np.concatenate(delta_list, axis=1)
final_res = np.concatenate([delta_list[:, 1:], delta_list[:, 0:1]], axis=1)
file_name = os.path.join(perDir, task_name + '_' + attack_name + '.csv')
np.savetxt(file_name, final_res, delimiter=',')
print(file_name, 'success')
all_res.append((final_res < MAX_PER_DICT[attack_name]).mean(0).reshape([-1, 1]))
print()
all_res = | np.concatenate(all_res, axis=1) | numpy.concatenate |
import numpy as np
from pyNastran.femutils.utils import unique2d
from pyNastran.dev.bdf_vectorized.utils import slice_to_iter
from pyNastran.dev.bdf_vectorized.cards.elements.solid.ctetra4 import volume4
from pyNastran.dev.bdf_vectorized.cards.elements.solid.chexa8 import quad_area_centroid
from pyNastran.dev.bdf_vectorized.cards.elements.solid.cpenta6 import tri_area_centroid
from pyNastran.dev.bdf_vectorized.cards.elements.shell.cquad4 import _cquad4_normal_A
from pyNastran.dev.bdf_vectorized.cards.elements.shell.ctria3 import _ctria3_normal_A
from pyNastran.dev.bdf_vectorized.cards.elements.utils import build_groups, asarray
from pyNastran.dev.bdf_vectorized.cards.vectorized_card import BaseMethods
class Elements(BaseMethods):
def __init__(self, model):
"""
Defines the Elements object.
Parameters
----------
model : BDF
the BDF object
"""
self.model = model
self.n = 0
self.nelements = 0
self.nproperties = 0
self.element_ids = None
self.property_ids = None
self.element_groups = None
self.property_groups = None
#: stores PSHELL, PCOMP, PCOMPG
self.properties_shell = model.properties_shell
#: stores CTRIA3, CTRIA6, CQUAD4, CQUAD8
self.elements_shell = model.elements_shell
# shear
#: stores CSHEAR
self.cshear = model.cshear
#: stores PSHEAR
self.pshear = model.pshear
# rigid
#self.rbe2 = model.rbe2
#self.rbe3 = model.rbe3
# spring
self.elements_spring = model.elements_spring
self.pelas = model.pelas
# bushings
self.cbush = model.cbush
self.cbush1d = model.cbush1d
self.cbush2d = model.cbush2d
self.pbush = model.pbush
# rods
self.conrod = model.conrod
self.prod = model.prod
self.crod = model.crod
self.ctube = model.ctube
self.ptube = model.ptube
# mass
#: stores CONM1, CONM2, CMASS1, CMASS2, CMASS3, CMASS4, CMASS5, PMASS
self.mass = model.mass
#self.conm1 = model.conm1
#self.conm2 = model.conm2
#self.cmass1 = self.cmass1
#self.cmass1 = self.cmass1
#self.cmass2 = self.cmass2
#self.cmass3 = self.cmass3
#self.cmass4 = self.cmass4
#self.cmass5 = self.cmass5
# bars
#: stores CBAR
self.cbar = model.cbar
#: stores PBAR, PBARL
self.properties_bar = model.properties_bar
# beams
#: stores CBEAM
self.cbeam = model.cbeam
#: stores PBEAM, PBEAML
self.properties_beam = model.properties_beam
# solids
#: stores CTETRA4, CPENTA6, CHEXA8, CTETRA10, CPENTA15, CHEXA20
self.elements_solid = model.elements_solid
#: stores PSOLID, PLSOLID
self.properties_solid = model.properties_solid
def validate_nodes(self, elements):
validate_nodes = False
if not hasattr(elements, 'node_ids'):
# this element isn't finished
return
if not validate_nodes:
# no checks
return
grids = self.model.grid.node_id
nids = np.unique(np.ravel(elements.node_ids))
#nids.sort()
diff = np.setdiff1d(nids, grids)
if len(diff):
eids = []
# find the bad elements
for i, eid in enumerate(elements.element_id):
j = np.intersect1d(diff, elements.node_ids[i, :])
if len(j):
eids.append(eid)
# prevents really long arrays
eids = np.array(eids)
msg = "Couldn't find Node ID: %s, which is required by %s %s" % (
diff, elements.type, eids)
raise RuntimeError(msg)
def build(self):
#print('elements')
self.n = 0
self._build_elements()
self._build_properties()
old_build = False
if old_build:
etypes = self._get_element_types(nlimit=False)
ptypes = self._get_property_types(nlimit=False)
for elems in etypes:
#if elems is None:
#continue
if hasattr(elems, 'type'):
if elems.type in self.model.card_count:
self.model.log.debug('building %s' % elems.__class__.__name__)
else:
#if elems.n:
self.model.log.debug('building %s' % elems.__class__.__name__)
elems.build()
self.nelements += elems.n
self.validate_nodes(elems)
#print(nids - grids[i])
for props in ptypes:
#if props is None:
#continue
if hasattr(props, 'type'):
if props.type in self.model.card_count:
self.model.log.debug('building %s' % props.__class__.__name__)
else:
#if props.n:
self.model.log.debug('building %s' % props.__class__.__name__)
props.build()
self.nproperties += props.n
else:
etypes = self._get_element_types(nlimit=True)
ptypes = self._get_property_types(nlimit=True)
self.model.log.debug('etypes = %s' % etypes)
self.model.log.debug('ptypes = %s' % ptypes)
for elems in etypes:
#if elems.type in ['CONROD']:
#self.nproperties += elems.n
self.nelements += elems.n
self.validate_nodes(elems)
for props in ptypes:
self.nproperties += props.n
self.model.log.debug('finished building %s' % self.__class__.__name__)
if self.nelements:
eids = check_duplicate('element_id', etypes, self.model.log)
self.element_ids = asarray(eids, dtype='int32')
self.element_ids.sort()
self.element_groups = build_groups(etypes, 'element_id', is_element=True)
#self.model.log.info('self.element_groups = %s' % self.element_groups)
else:
self.model.log.warning('no elements...')
if self.nproperties:
pids = check_duplicate('property_id', ptypes, self.model.log)
self.property_ids = asarray(pids, dtype='int32')
self.property_ids.sort()
self.property_groups = build_groups(ptypes, 'property_id')
self.model.log.info('self.property_groups = %s' % self.property_groups)
#print('*****self.element_ids =', self.element_ids)
#print('*****self.property_ids =', self.property_ids)
def get_elements(self, element_id):
type_map = {
'CELAS1' : self.elements_spring.celas1,
'CELAS2' : self.elements_spring.celas2,
'CELAS3' : self.elements_spring.celas3,
'CELAS4' : self.elements_spring.celas4,
#'CBUSH' : self.elements_bush.cbush,
#'CBUSH1D' : self.elements_bush.cbush1d,
#'CBUSH2D' : self.elements_bush.cbush2d,
'CBUSH' : self.cbush,
'CBUSH1D' : self.cbush1d,
'CBUSH2D' : self.cbush2d,
'CROD' : self.crod,
'CTUBE' : self.ctube,
'CONROD' : self.conrod,
'CSHEAR' : self.cshear,
'CTRIA3' : self.elements_shell.ctria3,
'CQUAD4' : self.elements_shell.cquad4,
'CTRIA6' : self.elements_shell.ctria6,
'CQUAD8' : self.elements_shell.cquad8,
'CTETRA4' : self.elements_solid.ctetra4,
'CPENTA6' : self.elements_solid.cpenta6,
'CHEXA8' : self.elements_solid.chexa8,
'CTETRA10' : self.elements_solid.ctetra10,
'CPENTA15' : self.elements_solid.cpenta15,
'CHEXA20' : self.elements_solid.chexa20,
}
out = []
for eid in element_id:
obj = None
for etype, eids in self.element_groups.items():
if eid in eids:
i = np.where(eid == eids)[0]
obj = type_map[etype][i]
out.append(obj)
return out
def get_element_properties(self, exclude_types=None):
if exclude_types is None:
exclude_types = []
element_objs = [
self.elements_spring.celas1, self.elements_spring.celas2,
self.elements_spring.celas3, self.elements_spring.celas4,
self.cshear,
self.crod, self.conrod, self.ctube,
self.cbar, self.cbeam,
self.cbush, self.cbush1d, self.cbush2d,
self.elements_shell.ctria3, self.elements_shell.cquad4,
self.elements_shell.ctria6, self.elements_shell.cquad8,
self.elements_solid.ctetra4, self.elements_solid.cpenta6, self.elements_solid.chexa8,
self.elements_solid.ctetra10, self.elements_solid.cpenta15, self.elements_solid.chexa20,
]
if exclude_types is None:
exclude_types = []
element_objs2 = []
for element_obj in element_objs:
if element_obj.type not in exclude_types:
element_objs2.append(element_obj)
element_objs = element_objs2
del element_objs2
# this isn't working...
#element_objs = [element_obj if element_obj.type not in exclude_types
#for element_obj in element_objs]
elements_without_properties = ['CELAS2', 'CELAS4', 'CONROD']
eids = np.hstack([element_obj.element_id for element_obj in element_objs])
pids = np.hstack([np.zeros(element_obj.n, dtype='int32')
if element_obj.type in elements_without_properties
else element_obj.property_id for element_obj in element_objs])
return element_objs, eids, pids
def get_element_ids_by_property_type(self, element_ids, exclude_types=None):
#self.model.log.debug('element_ids = %s' % element_ids)
Types, eids, pids = self.get_element_properties(exclude_types)
# remove undefined properties
#existing_pids = setdiff1d(unique(pids), self.property_ids, assume_unique=True)
#self.model.log.debug('pids = %s' % pids)
#self.model.log.debug('self.property_ids = %s' % self.property_ids)
#self.model.log.debug('existing_pids = %s' % existing_pids)
# make sure oids is unique
oids = np.hstack([Type.op2_id for Type in Types])
oids2 = np.unique(oids)
assert len(oids) == len(oids2), oids
oids = np.hstack([Type.op2_id * np.ones(Type.n, dtype='int32') for Type in Types])
i = np.argsort(eids)
#self.model.log.debug('i = %s' % i)
#self.model.log.debug('eids = %s len=%s' % (eids, len(eids)))
#self.model.log.debug('pids = %s len=%s' % (pids, len(pids)))
#self.model.log.debug('oids = %s len=%s' % (oids, len(oids)))
assert len(eids) == len(pids), 'len(eids)=%i len(pids)=%i' % (len(eids), len(pids))
assert len(eids) == len(oids), 'len(eids)=%i len(oids)=%i' % (len(eids), len(oids))
eids = eids[i]
pids = pids[i]
oids = oids[i]
data = np.vstack([eids, pids, oids]).T
#self.model.log.debug(data)
# drop extra elements
# for eids greater than the max allowable eid located at data[-1,0],
# we drop them
i_less = np.where(data[-1, 0] >= element_ids)[0]
element_ids = element_ids[i_less]
# drop more extra elements
# we're handling cases of skipped elements (e.g. CELASx cards)
# that have a sorted location in data, but no unique value
#print('++++++ %s' % element_ids)
#print('++++++ %s' % data[:, 0])
ie = np.unique(np.searchsorted(data[:, 0], element_ids))
#print('ie = %s' % ie)
#print('dataA \n%s' % data)
return data[ie, :]
#return data
def get_nodes(self, node_id, xyz_cid0, msg=''):
i = self.model.grid.get_node_index_by_node_id(node_id, msg=msg)
return xyz_cid0[i, :]
def _get_element_ids(self, element_ids_orig):
if element_ids_orig is None:
element_ids = self.element_ids
element_ids_orig = element_ids
#print('A %s' % element_ids)
else:
# remove elements that don't exist in the BDF
#print("self.element_ids = \n%s" % str(self.element_ids))
#print("element_ids = \n%s" % str(element_ids))
element_ids_orig = asarray(element_ids_orig)
element_ids = | np.intersect1d(element_ids_orig, self.element_ids) | numpy.intersect1d |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import collections
import copy as cp
import math
from collections import OrderedDict
import os.path
import numpy as np
import time
import operator
import sys
import pickle
import os
import random
from datetime import datetime
from .Node import Node
from .utils import latin_hypercube, from_unit_cube
# from torch.quasirandom import SobolEngine
# import torch
class MCTS:
#############################################
def __init__(
self,
space,
sample_latent_bounds,
dims,
split_latent_converter,
sample_latent_converter,
split_latent_dims,
sample_latent_dims,
C_p=10,
sample_per_inner_alg=1,
split_metric: str = 'max',
kernel_type="rbf",
solver='cmaes',
# solver_options={},
cmaes_sigma_mult=1.,
use_gpr=True,
gamma_type="auto",
treeify_freq=1,
init_within_leaf='mean',
leaf_size=20,
splitter_type='kmeans',
normalize=True,
rng=np.random.RandomState(42),
split_use_predict=True,
verbose=False,
**kwargs):
'''
::solver: type=str, default='cmaes', choices=['cmaes'], help='leaf solver'
::init_within_leaf: type=str, default='mean', choices=['mean', 'random', 'max'], help='how to choose initial value within leaf for cmaes and gradient'
::leaf_size: type=int, default=20, help='min leaf size before splitting'
::split_type: type=str, default='kmeans', choices=['kmeans', 'linreg', 'value'], help='how to split nodes for LaMCTS. value = just split in half based on value'
'''
self.space = space
# = args
self.split_latent_converter = split_latent_converter
self.sample_latent_converter = sample_latent_converter
self.dims = dims
self.split_metric = split_metric
self.solver_type = solver
# ub, lb = sample_latent_bounds.ub, sample_latent_bounds.lb
self.cmaes_sigma_mult = cmaes_sigma_mult
self.use_gpr = use_gpr
self.treeify_freq = treeify_freq
self.init_within_leaf = init_within_leaf
# self.leaf_size = leaf_size
# self.split_type = split_type
# self.split_latent_dims = func.split_latent_converter.latent_dim if func.split_latent_converter is not None else dims
# self.sample_latent_dims = func.sample_latent_converter.latent_dim if args.latent_samples else dims
self.split_latent_dims = split_latent_dims
self.sample_latent_dims = sample_latent_dims
self.sample_latent_bounds = sample_latent_bounds
self.rng = rng
self.samples = []
self.f_samples = []
self.nodes = []
self.C_p = C_p
self.sample_per_inner_alg = sample_per_inner_alg
self.sample_per_inner_alg_count = 0
# self.lb = lb
# self.ub = ub
# self.ninits = ninits
# self.func = func
# self.curt_best_value = float("inf")
# self.curt_best_sample = None
self.best_value_trace = []
# self.sample_counter = 0
self.visualization = False
self.LEAF_SAMPLE_SIZE = leaf_size
self.kernel_type = kernel_type
self.gamma_type = gamma_type
# self.cmaes_sigma_mult = args.cmaes_sigma_mult
# self.solver_type = args.solver #solver can be 'bo' or 'turbo'
self.normalize = normalize
self.splitter_type = splitter_type
self.verbose = verbose
if self.verbose:
print("gamma_type:", gamma_type)
self.kwargs = kwargs
#we start the most basic form of the tree, 3 nodes and height = 1
self.split_use_predict = split_use_predict
root = Node(
parent=None,
sample_dims=self.sample_latent_dims,
split_dims=self.split_latent_dims,
true_dims=self.dims,
reset_id=True,
kernel_type=self.kernel_type,
cmaes_sigma_mult=self.cmaes_sigma_mult,
leaf_size=self.LEAF_SAMPLE_SIZE,
splitter_type=self.splitter_type,
split_metric=self.split_metric,
use_gpr=self.use_gpr,
gamma_type=self.gamma_type,
normalize=self.normalize,
verbose=self.verbose,
rng=self.rng,
split_use_predict=split_use_predict,
**kwargs)
self.nodes.append(root)
self.ROOT = root
self.CURT = self.ROOT
# self.init_train()
self.iterations_since_treeify = 0
def populate_training_data(self):
#only keep root
self.ROOT.obj_counter = 0
for node in self.nodes:
node.clear_data()
self.nodes.clear()
new_root = Node(
parent=None,
sample_dims=self.sample_latent_dims,
split_dims=self.split_latent_dims,
true_dims=self.dims,
reset_id=True,
kernel_type=self.kernel_type,
cmaes_sigma_mult=self.cmaes_sigma_mult,
leaf_size=self.LEAF_SAMPLE_SIZE,
splitter_type=self.splitter_type,
split_metric=self.split_metric,
use_gpr=self.use_gpr,
gamma_type=self.gamma_type,
normalize=self.normalize,
verbose=self.verbose,
rng=self.rng,
split_use_predict=self.split_use_predict,
**self.kwargs)
self.nodes.append(new_root)
self.ROOT = new_root
self.CURT = self.ROOT
self.ROOT.update_bag(self.latent_samples, self.split_vectors,
self.samples, self.f_samples)
def get_leaf_status(self):
status = []
for node in self.nodes:
if node.is_leaf() == True and len(
node.sample_X
) > self.LEAF_SAMPLE_SIZE and node.is_svm_splittable == True:
status.append(True)
else:
status.append(False)
return | np.array(status) | numpy.array |
import numpy as np
import json
from collections import OrderedDict
from scipy.spatial import distance
from numpy.linalg import eigh
from matplotlib import pyplot as plt
import sklearn.metrics
import xgboost as xgb
from .. import pipeline as pipe
import numpy.random as random
import sys
import pandas as pd
np.random.seed(132)
def run_xgboost(dtrain, dtest):
xg_params = {
"objective": "binary:logistic",
"booster" : "gbtree",
"eval_metric" : "logloss",
"eta": random.uniform(0.01, 0.3),
"max_depth": random.randint(2, 4),
"subsample": random.uniform(0.5, 0.95),
"colsample_bytree": random.uniform(0.5, 0.95),
"silent": 1,
"seed": 0,
"nthread" : 5
}
num_boost_round = 1000
early_stopping_rounds = 25
evallist = [(dtest, 'test')]
bst = xgb.train(xg_params, dtrain, 1000, evals=evallist, early_stopping_rounds = 20)
#calculating predcition not necessary, score already saved
#predictions = bst.predict(dtest, ntree_limit = bst.best_ntree_limit)
#uses early stopping to determine optimal epoch
log_loss = bst.best_score
return log_loss, xg_params, bst
def logloss(prediction, label):
eps = 1e-7
prediction = np.maximum( | np.minimum(prediction, 1-eps) | numpy.minimum |
import numpy as np
import warnings
class Gaussian:
def __init__(self, use_means=True, Q=None, mu=None):
self.use_means = use_means
self.Q = Q.copy() if Q is not None else None
self.mu = mu.copy() if mu is not None else None
def log_prob(self, X):
n, d = X.shape
if self.use_means:
Z = X - self.mu
else:
Z = X
pi_norm = - .5 * d * np.log(2 * np.pi)
log_det_q = .5 * np.linalg.slogdet(self.Q)[1]
zq = np.dot(Z, self.Q)
zqz = np.multiply(zq, Z).sum(axis=1)
m_dist = -.5 * zqz
return pi_norm + log_det_q + m_dist
def update_from_weights(self, X, W_j, Q_estimator, warm_start=False):
S, m = self.get_sufficient_statistics(X, W_j)
if Q_estimator is None: # use normal MLE
self.Q = np.linalg.inv(S)
if self.use_means:
self.mu = m
return
# normalizing S to have 1's in the diag
sqrt_vars = np.sqrt(np.diag(S)).reshape(-1, 1)
inv_sqrt_vars = (sqrt_vars ** -1).reshape(-1, 1)
normed_S = inv_sqrt_vars * S * inv_sqrt_vars.T
if warm_start:
normed_Q = sqrt_vars * self.Q * sqrt_vars.T
else:
normed_Q = np.eye(S.shape[0])
Q_est_normed = Q_estimator.fit(normed_S, normed_Q)
self.Q = inv_sqrt_vars * Q_est_normed * inv_sqrt_vars.T
if self.use_means:
self.mu = m
def get_sufficient_statistics(self, X, W):
W_sum = W.sum()
if np.allclose(W_sum, 0):
# print("D", end="", flush=True)
warnings.warn("Empty Component, randomizing", RuntimeWarning)
W = | np.random.choice([0., 1.], X.shape[0], p=[.9, .1]) | numpy.random.choice |
import numpy as np
import theano
import theano.tensor as T
from sfo.sfo import SFO
from skimage.filter import gabor_kernel
from skimage.transform import resize
def generate_data(ndim, nsamples, nfeatures):
"""Generate data by drawing samples that are a sparse combination of gabor features
"""
# build features
features = list()
for j in range(nfeatures):
theta = np.pi * | np.random.rand() | numpy.random.rand |
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# fluctmatch --- https://github.com/tclick/python-fluctmatch
# Copyright (c) 2013-2017 The fluctmatch Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the New BSD license.
#
# Please cite your use of fluctmatch in published work:
#
# <NAME>, <NAME>, and <NAME>.
# Calculation of Enzyme Fluctuograms from All-Atom Molecular Dynamics
# Simulation. Meth Enzymology. 578 (2016), 327-342,
# doi:10.1016/bs.mie.2016.05.024.
#
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from future.builtins import (
super, )
import numpy as np
from MDAnalysis.core import selection
class BioIonSelection(selection.Selection):
"""Contains atoms commonly found in proteins.
"""
token = "bioion"
ion_atoms = np.array(["MG", "CAL", "MN", "FE", "CU", "ZN", "AG"])
def __init__(self, parser, tokens):
pass
def apply(self, group):
mask = np.in1d(group.names, self.ion_atoms)
return group[mask].unique
class WaterSelection(selection.Selection):
"""Contains atoms commonly found in water.
"""
token = "water"
water_atoms = np.array(["OW", "HW1", "HW2", "MW"])
def __init__(self, parser, tokens):
pass
def apply(self, group):
mask = np.in1d(group.names, self.water_atoms)
return group[mask].unique
class BackboneSelection(selection.BackboneSelection):
"""Contains all heavy atoms within a protein backbone including the terminal carboxyl oxygens.
"""
token = "backbone"
oxy_atoms = ["OXT", "OT1", "OT2"]
def apply(self, group):
mask = np.in1d(group.names,
np.concatenate([self.bb_atoms, self.oxy_atoms]))
mask &= np.in1d(group.resnames, self.prot_res)
return group[mask].unique
class HBackboneSelection(BackboneSelection):
"""Includes all atoms found within a protein backbone including hydrogens.
"""
token = "hbackbone"
hbb_atoms = np.array([
"H", "HN", "H1", "H2", "H3", "HT1", "HT2", "HT3", "HA", "HA1", "HA2",
"1HA", "2HA"
])
def apply(self, group):
mask = np.in1d(group.names,
np.concatenate(
[self.bb_atoms, self.oxy_atoms, self.hbb_atoms]))
mask &= np.in1d(group.resnames, self.prot_res)
return group[mask].unique
class CalphaSelection(selection.ProteinSelection):
"""Contains only the alpha-carbon of a protein.
"""
token = "calpha"
calpha = np.array(["CA"])
def apply(self, group):
mask = np.in1d(group.names, self.calpha)
mask &= np.in1d(group.resnames, self.prot_res)
return group[mask].unique
class HCalphaSelection(CalphaSelection):
"""Contains the alpha-carbon and alpha-hydrogens of a protein.
"""
token = "hcalpha"
hcalpha = | np.array(["HA", "HA1", "HA2", "1HA", "2HA"]) | numpy.array |
from functools import partial
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numba as nb
import numpy as np
import scipy.linalg as linalg
from jax import lax, jit
__all__ = ["make_parameters", "get_data"]
def _transition_function(x, dt):
""" Deterministic transition function used in the state space model
Parameters
----------
x: array_like
The current state
dt: float
Time step between observations
Returns
-------
out: array_like
The transitioned state
"""
w = x[-1]
predicate = jnp.abs(w) < 1e-6
coswt = jnp.cos(w * dt)
sinwt = jnp.sin(w * dt)
def true_fun(_):
return coswt, 0., sinwt, dt
def false_fun(_):
coswto = coswt - 1
return coswt, coswto / w, sinwt, sinwt / w
coswt, coswtopw, sinwt, sinwtpw = lax.cond(predicate, true_fun, false_fun, None)
F = jnp.array([[1, 0, sinwtpw, -coswtopw, 0],
[0, 1, coswtopw, sinwtpw, 0],
[0, 0, coswt, sinwt, 0],
[0, 0, -sinwt, coswt, 0],
[0, 0, 0, 0, 1]])
return F @ x
def _observation_function(x, s1, s2):
"""
Returns the observed angles as function of the state and the sensors locations
Parameters
----------
x: array_like
The current state
s1: array_like
The first sensor location
s2: array_like
The second sensor location
Returns
-------
y: array_like
The observed angles, the first component is the angle w.r.t. the first sensor, the second w.r.t the second.
"""
return jnp.array([jnp.arctan2(x[1] - s1[1], x[0] - s1[0]),
jnp.arctan2(x[1] - s2[1], x[0] - s2[0])])
@partial(jnp.vectorize, excluded=(1, 2), signature="(m)->(d)")
def inverse_bearings(observation, s1, s2):
"""
Inverse the bearings observation to the location as if there was no noise,
This is only used to provide an initial point for the linearization of the IEKS and ICKS.
Parameters
----------
observation: (2) array
The bearings observation
s1: (2) array
The first sensor position
s2: (2) array
The second sensor position
Returns
-------
out: (2) array
The inversed position of the state
"""
tan_theta = jnp.tan(observation)
A = jnp.array([[tan_theta[0], -1],
[tan_theta[1], -1]])
b = jnp.array([s1[0] * tan_theta[0] - s1[1],
s2[0] * tan_theta[1] - s2[1]])
return jnp.linalg.solve(A, b)
def make_parameters(qc, qw, r, dt, s1, s2):
""" Discretizes the model with continuous transition noise qc, for step-size dt.
The model is described in "Multitarget-multisensor tracking: principles and techniques" by
Bar-Shalom, Yaakov and <NAME>
Parameters
----------
qc: float
Transition covariance of the continuous SSM
qw: float
Transition covariance of the continuous SSM
r: float
Observation error standard deviation
dt: float
Discretization time step
s1: array_like
The location of the first sensor
s2: array_like
The location of the second sensor
Returns
-------
Q: array_like
The transition covariance matrix for the discrete SSM
R: array_like
The observation covariance matrix
observation_function: callable
The observation function
transition_function: callable
The transition function
"""
Q = jnp.array([[qc * dt ** 3 / 3, 0, qc * dt ** 2 / 2, 0, 0],
[0, qc * dt ** 3 / 3, 0, qc * dt ** 2 / 2, 0],
[qc * dt ** 2 / 2, 0, qc * dt, 0, 0],
[0, qc * dt ** 2 / 2, 0, qc * dt, 0],
[0, 0, 0, 0, dt * qw]])
R = r ** 2 * jnp.eye(2)
observation_function = jit(partial(_observation_function, s1=s1, s2=s2))
transition_function = jit(partial(_transition_function, dt=dt))
return Q, R, observation_function, transition_function
@nb.njit
def _get_data(x, dt, a_s, s1, s2, r, normals, observations, true_states):
for i, a in enumerate(a_s):
with nb.objmode(x='float32[::1]'):
F = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, a],
[0, 0, -a, 0]], dtype=np.float32)
x = linalg.expm(F * dt) @ x
y1 = np.arctan2(x[1] - s1[1], x[0] - s1[0]) + r * normals[i, 0]
y2 = np.arctan2(x[1] - s2[1], x[0] - s2[0]) + r * normals[i, 1]
observations[i] = [y1, y2]
observations[i] = [y1, y2]
true_states[i] = np.concatenate((x, np.array([a])))
# return true_states, observations
def get_data(x0, dt, r, T, s1, s2, q=10., random_state=None):
"""
Parameters
----------
x0: array_like
true initial state
dt: float
time step for observations
r: float
observation model standard deviation
T: int
number of time steps
s1: array_like
The location of the first sensor
s2: array_like
The location of the second sensor
q: float
noise of the angular momentum
random_state: np.random.RandomState or int, optional
numpy random state
Returns
-------
ts: array_like
array of time steps
true_states: array_like
array of true states
observations: array_like
array of observations
"""
if random_state is None or isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
a_s = 1 + q * dt * np.cumsum(random_state.randn(T))
a_s = a_s.astype(np.float32)
s1 = np.asarray(s1, dtype=np.float32)
s2 = np.asarray(s2, dtype=np.float32)
x = np.copy(x0).astype(np.float32)
observations = | np.empty((T, 2), dtype=np.float32) | numpy.empty |
import random
import numpy as np
import pynmmso as nmmso
class Swarm:
"""
Represents a swarm in the NMMSO algorithm.
Arguments
---------
id : int
Id used to refer to the swarm
swarm_size : int
Maximum number of particles in the swarm
problem :
Instance of the problem class. Must implement get_bounds and fitness functions.
listener : subclass of nmmso.listeners.BaseListener
Listener object to receive notification of events. Optional.
Attributes
----------
id : int
A unique identification number of this swarm.
mode_location : numpy array
The location of this mode.
mode_value : float
The fitness of the mode location.
number_of_particles : int
Number of particles in the swarm.
history_locations : 2D Numpy array
The current locations of each particle in the swarm.
history_values : 1D Numpy array
The fitness values for current locations of each particle in the swarm.
velocities : 2D Numpy array
Current velocity of each particle in the swarm.
pbest_location : 2D Numpy array
The best location discovered for each particle.
pbest_value : 1D Numpy array
The fitness value associated with the best location for each particle in the swarm.
"""
def __init__(self, id, swarm_size, problem, listener=None):
self.id = id
self.swarm_size = swarm_size
self.problem = problem
self.listener = listener
self.mn = np.array(problem.get_bounds()[0])
self.mx = np.array(problem.get_bounds()[1])
self.changed = True
self.converged = False
self.num_dimensions = len(self.mn)
self.mode_location = None # Will be populated later on
self.new_location = None # Will be populated later on
self.mode_value = None # Will be populated later on
# Initialize locations for swarm elements
# current locations of swarm
self.history_locations = np.zeros((self.swarm_size, self.num_dimensions))
# current values of swarm
self.history_values = np.full(self.swarm_size, -np.inf)
# current best locations of swarm
self.pbest_locations = np.zeros((self.swarm_size, self.num_dimensions))
# current best values of swarm
self.pbest_values = np.full(self.swarm_size, -np.inf)
self.velocities = np.zeros((swarm_size, self.num_dimensions))
self.number_of_particles = 1
self.shifted_loc = None # Will be populated later on
self.dist = None # Will be populated later on
def set_initial_location(self):
"""Sets the initial location of a swarm."""
self.changed = True
self.new_location = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
# random initial velocities of swarm
self.velocities[0, :] = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn
def set_arbitrary_distance(self):
"""Set an arbitrary distance - this is done when we only have one swarm"""
self.dist = np.min(self.mx-self.mn)
def increment(self):
""" Increments the swarm. """
new_location = self.mn - 1
d = self.dist
shifted = False
omega = 0.1
reject = 0
r = random.randrange(self.swarm_size) # select particle at random to move
while np.sum(new_location < self.mn) > 0 or np.sum(new_location > self.mx) > 0:
# if swarm is not yet at capacity, simply add a new particle
if self.number_of_particles < self.swarm_size:
usp = nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0]
new_location = self.mode_location + usp * (d/2)
else:
# move an existing particle
shifted = True
self.shifted_loc = r
r1 = np.random.rand(self.num_dimensions)
r2 = np.random.rand(self.num_dimensions)
temp_vel = omega * self.velocities[self.shifted_loc, :] + \
2.0 * r1 * \
(self.mode_location - self.history_locations[self.shifted_loc, :]) + \
2.0 * r2 * \
(self.pbest_locations[self.shifted_loc, :] -
self.history_locations[self.shifted_loc, :])
if reject > 20:
# if we keep rejecting then put at extreme any violating design parameters
i_max = np.flatnonzero(
np.asarray(
self.history_locations[self.shifted_loc, :] + temp_vel > self.mx))
i_min = np.flatnonzero(
np.asarray(
self.history_locations[self.shifted_loc, :] + temp_vel < self.mn))
if i_max.size > 0:
temp_vel[i_max] = \
np.random.rand(i_max.size) * \
(self.mx[i_max] - self.history_locations[self.shifted_loc, i_max])
if i_min.size > 0:
temp_vel[i_min] = \
np.random.rand(i_min.size) * \
(self.history_locations[self.shifted_loc, i_min] - self.mn[i_min])
new_location = self.history_locations[self.shifted_loc, :] + temp_vel
reject = reject + 1
if shifted:
self.velocities[self.shifted_loc, :] = temp_vel
else:
# otherwise initialise velocity in sphere based on distance from gbest to next
# closest mode
self.number_of_particles = self.number_of_particles + 1
self.shifted_loc = self.number_of_particles - 1
temp_vel = self.mn - 1
reject = 0
while np.sum(temp_vel < self.mn) > 0 or np.sum(temp_vel > self.mx) > 0:
temp_vel = \
self.mode_location + \
nmmso.Nmmso.uniform_sphere_points(1, self.num_dimensions)[0] * (d / 2)
reject = reject + 1
if reject > 20: # resolve if keep rejecting
temp_vel = np.random.rand(self.num_dimensions)*(self.mx-self.mn) + self.mn
self.velocities[self.shifted_loc, :] = temp_vel
self.new_location = new_location
if self.listener is not None:
if shifted:
self.listener.swarm_moved_particle(self)
else:
self.listener.swarm_added_particle(self)
def initialise_with_uniform_crossover(self, swarm1, swarm2):
"""
Initialise a new swarm with the uniform crossover of the given swarms.
Arguments
---------
swarm1 : Swarm
swarm2 : Swarm
"""
self.new_location, _ = Swarm.uni(swarm1.mode_location, swarm2.mode_location)
self.evaluate_first()
self.changed = True
self.converged = False
def distance_to(self, swarm):
"""
Euclidean distance between this swarm and the given swarm, based on their mode locations.
Returns
-------
float
The distance between the two swarms.
"""
return | np.linalg.norm(self.mode_location-swarm.mode_location) | numpy.linalg.norm |
__doc__ = """Helical buckling convergence study, for detailed explanation refer to Gazzola et. al. R. Soc. 2018
section 3.4.1 """
import numpy as np
import sys
# FIXME without appending sys.path make it more generic
sys.path.append("../../")
from elastica import *
from examples.HelicalBucklingCase.helicalbuckling_postprocessing import (
analytical_solution,
envelope,
plot_helicalbuckling,
)
from examples.convergence_functions import plot_convergence, calculate_error_norm
class HelicalBucklingSimulator(BaseSystemCollection, Constraints, Forcing):
pass
# Options
PLOT_FIGURE = True
SAVE_FIGURE = True
SAVE_RESULTS = False
def simulate_helicalbucklin_beam_with(
elements=10, SAVE_FIGURE=False, PLOT_FIGURE=False
):
helicalbuckling_sim = HelicalBucklingSimulator()
# setting up test params
n_elem = elements
start = np.zeros((3,))
direction = np.array([0.0, 0.0, 1.0])
normal = np.array([0.0, 1.0, 0.0])
base_length = 100.0
base_radius = 0.35
base_area = np.pi * base_radius ** 2
density = 1.0 / (base_area)
nu = 0.01
E = 1e6
slack = 3
number_of_rotations = 27
# For shear modulus of 1e4, nu is 99!
poisson_ratio = 99
shear_matrix = np.repeat(1e5 * np.identity((3))[:, :, np.newaxis], n_elem, axis=2)
temp_bend_matrix = np.zeros((3, 3))
| np.fill_diagonal(temp_bend_matrix, [1.345, 1.345, 0.789]) | numpy.fill_diagonal |
from scipy.spatial import ConvexHull
import numpy as np
from scipy.integrate import simps
from scipy import signal
import antropy as ant
import scipy.stats
import nolds
from package import diffusion_stabilogram
from package import recurrence_quantification_analysis
from package import fractal_dimension
## NOTE: Recordings from Bertec Acquire have the following specifications:
#Row 1 = Time
#Row 2 = Fz
#Row 3 = Mx
#Row 4 = My
#Row 5 = CoPx = CoP_ML
#Row 6 = CoPy = CoP_AP
#Note. CoPx = -My/Fz
#Note. CoPy = Mx/Fz
def _recenter(data):
"""De-means the data"""
data = np.array(data)
return data - data.mean()
def _delta(data):
"""Gets the difference in data, i.e., delta[i] = x[i+1] - x[i]"""
d1 = np.array(data[:-1])
d2 = np.array(data[1:])
return d2 - d1
def _eig(data):
"""Returns eigenvectors and eigenvalues from the x y"""
def _confidence_ellipse(x,y):
N = len(x)
corr = np.zeros([2,2])
corr[0,0] = sum(x ** 2)
corr[1,1] = sum(y ** 2)
corr[0,1] = corr[1,0] = sum(x * y)
w,v = np.linalg.eig(corr)
major_idx = np.argmax(w)
minor_idx = np.argmin(w)
major_radius = np.sqrt(w[major_idx]/(N-1))
minor_radius = np.sqrt(w[minor_idx]/(N-1))
major_axis=v[:,major_idx]
minor_axis=v[:,minor_idx]
return major_radius,minor_radius,major_axis,minor_axis
def _get_psd(data,method=None):
T = data[0][1] - data[0][0]
fs = 1/T
if method == 'multitaper':
from mne.time_frequency import psd_array_multitaper
psd_ML, f_ML = psd_array_multitaper(data[4], fs, adaptive=True, normalization='full', verbose=0)
psd_AP, f_AP = psd_array_multitaper(data[5], fs, adaptive=True, normalization='full', verbose=0)
elif method == None:
f_ML, psd_ML = signal.periodogram(data[4], fs=fs)
f_AP, psd_AP = signal.periodogram(data[5], fs=fs)
else:
print("Please enter a valid method. Either 'multitaper' or None")
return
return psd_ML, psd_AP, f_ML, f_AP
####################################
def get_area95(data):
"""following https://www1.udel.edu/biology/rosewc/kaap686/reserve/cop/center%20of%20position%20conf95.pdf """
x, y = _recenter(data[4]), _recenter(data[5])
major_radius,minor_radius,_,_ = _confidence_ellipse(x, y)
area95 = 5.991 * np.pi * major_radius * minor_radius
return area95
def get_swayarea(data):
"""Returns sway area of the stabilogram. Defined by the convex hull of all points"""
cop_x = data[4]
cop_y = data[5]
return ConvexHull(list(zip(cop_x,cop_y))).volume #Volume = area for a 2d shape
def get_area95majoraxis(data):
"""Returns the angle of the major axis wrt the x axis, from the area95 ellipse"""
x, y = _recenter(data[4]), _recenter(data[5])
_,_,major_axis,minor_axis = _confidence_ellipse(x, y)
vector_1 = [1,0] #x axis
vector_2 = major_axis
unit_vector_1 = vector_1 / np.linalg.norm(vector_1)
unit_vector_2 = vector_2 / np.linalg.norm(vector_2)
dot_product = np.dot(unit_vector_1, unit_vector_2)
angle = np.degrees(np.arccos(dot_product))
return angle
def get_area95_axis_length(data):
"""Returns the major and minor axis lengths from the area95 ellipse"""
x, y = _recenter(data[4]), _recenter(data[5])
major_radius,minor_radius,_,_ = _confidence_ellipse(x, y)
major_axis_length = np.sqrt(5.991)*major_radius*2
minor_axis_length = np.sqrt(5.991)*minor_radius*2
return major_axis_length, minor_axis_length
def get_area95_minoraxis_tangent(data):
"""Is extremely poorly defined in doi: 10.1002/mds.25449. Is left blank here"""
return None
def get_markedarea(data):
"""The calculation of the surface is carried out graphically with a res-
olution of 0.0025 cm 2 .
Continuous triangles from the mean
value of all measurement values to the last measurement point
to the current measurement point are calculated. Points on the
grid which overlap numerous times are not counted more than
once (measured in square meters).
POORLY DEFINED. Is this alpha shape or something?
"""
return
def get_area90_length(data):
"""Is very poorly defined in the corresponding paper (doi: 10.1123/mcj.6.3.246). We are assuming that this is simply the 90% confidence interval in ML and AP directions"""
x, y = _recenter(data[4]), _recenter(data[5])
confidence = 0.9
n = len(x)
std_err_x = scipy.stats.sem(x)
interval_x = std_err_x * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
CI_90_ML = interval_x * 2
n = len(y)
std_err_y = scipy.stats.sem(y)
interval_y = std_err_y * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
CI_90_AP = interval_y * 2
return CI_90_ML, CI_90_AP
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * | np.array(data) | numpy.array |
###############################################################################
# Reader for CINE files produced by Vision Research Phantom Software
# Author: <NAME>
# <EMAIL>
# Modified by <NAME> (<EMAIL>)
# Added to PIMS by <NAME> (<EMAIL>)
# Modified by <NAME>
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from pims.frame import Frame
from pims.base_frames import FramesSequence, index_attr
from pims.utils.misc import FileLocker
import time
import struct
import numpy as np
from numpy import array, frombuffer, where
from threading import Lock
import datetime
import hashlib
import sys
import warnings
from collections.abc import Iterable
__all__ = ('Cine', )
# '<' for little endian (cine documentation)
def _build_struct(dtype):
return struct.Struct(str("<" + dtype))
FRACTION_MASK = (2**32-1)
MAX_INT = 2**32
# Harmonized/simplified cine file data types with Python struct doc
UINT8 = 'B'
CHAR = 'b'
UINT16 = 'H'
INT16 = 'h'
BOOL = 'i'
UINT32 = 'I'
INT32 = 'i'
INT64 = 'q'
FLOAT = 'f'
DOUBLE = 'd'
TIME64 = 'Q'
RECT = '4i'
WBGAIN = '2f'
IMFILTER = '28i'
# TODO: get correct format for TrigTC
TC = '8s'
CFA_NONE = 0 # gray sensor
CFA_VRI = 1 # gbrg/rggb sensor
CFA_VRIV6 = 2 # bggr/grbg sensor
CFA_BAYER = 3 # gb/rg sensor
CFA_BAYERFLIP = 4 #rg/gb sensor
TAGGED_FIELDS = {
1000: ('ang_dig_sigs', ''),
1001: ('image_time_total', TIME64),
1002: ('image_time_only', TIME64),
1003: ('exposure_only', UINT32),
1004: ('range_data', ''),
1005: ('binsig', ''),
1006: ('anasig', ''),
1007: ('time_code', '')}
HEADER_FIELDS = [
('type', '2s'),
('header_size', UINT16),
('compression', UINT16),
('version', UINT16),
('first_movie_image', INT32),
('total_image_count', UINT32),
('first_image_no', INT32),
('image_count', UINT32),
# Offsets of following sections
('off_image_header', UINT32),
('off_setup', UINT32),
('off_image_offsets', UINT32),
('trigger_time', TIME64),
]
BITMAP_INFO_FIELDS = [
('bi_size', UINT32),
('bi_width', INT32),
('bi_height', INT32),
('bi_planes', UINT16),
('bi_bit_count', UINT16),
('bi_compression', UINT32),
('bi_image_size', UINT32),
('bi_x_pels_per_meter', INT32),
('bi_y_pels_per_meter', INT32),
('bi_clr_used', UINT32),
('bi_clr_important', UINT32),
]
SETUP_FIELDS = [
('frame_rate_16', UINT16),
('shutter_16', UINT16),
('post_trigger_16', UINT16),
('frame_delay_16', UINT16),
('aspect_ratio', UINT16),
('contrast_16', UINT16),
('bright_16', UINT16),
('rotate_16', UINT8),
('time_annotation', UINT8),
('trig_cine', UINT8),
('trig_frame', UINT8),
('shutter_on', UINT8),
('description_old', '121s'),
('mark', '2s'),
('length', UINT16),
('binning', UINT16),
('sig_option', UINT16),
('bin_channels', INT16),
('samples_per_image', UINT8),
] + [('bin_name{:d}'.format(i), '11s') for i in range(8)] + [
('ana_option', UINT16),
('ana_channels', INT16),
('res_6', UINT8),
('ana_board', UINT8),
] + [('ch_option{:d}'.format(i), INT16) for i in range(8)] + [
] + [('ana_gain{:d}'.format(i), FLOAT) for i in range(8)] + [
] + [('ana_unit{:d}'.format(i), '6s') for i in range(8)] + [
] + [('ana_name{:d}'.format(i), '11s') for i in range(8)] + [
('i_first_image', INT32),
('dw_image_count', UINT32),
('n_q_factor', INT16),
('w_cine_file_type', UINT16),
] + [('sz_cine_path{:d}'.format(i), '65s') for i in range(4)] + [
('b_mains_freq', UINT16),
('b_time_code', UINT8),
('b_priority', UINT8),
('w_leap_sec_dy', UINT16),
('d_delay_tc', DOUBLE),
('d_delay_pps', DOUBLE),
('gen_bits', UINT16),
('res_1', INT32),
('res_2', INT32),
('res_3', INT32),
('im_width', UINT16),
('im_height', UINT16),
('edr_shutter_16', UINT16),
('serial', UINT32),
('saturation', INT32),
('res_5', UINT8),
('auto_exposure', UINT32),
('b_flip_h', BOOL),
('b_flip_v', BOOL),
('grid', UINT32),
('frame_rate', UINT32),
('shutter', UINT32),
('edr_shutter', UINT32),
('post_trigger', UINT32),
('frame_delay', UINT32),
('b_enable_color', BOOL),
('camera_version', UINT32),
('firmware_version', UINT32),
('software_version', UINT32),
('recording_time_zone', INT32),
('cfa', UINT32),
('bright', INT32),
('contrast', INT32),
('gamma', INT32),
('res_21', UINT32),
('auto_exp_level', UINT32),
('auto_exp_speed', UINT32),
('auto_exp_rect', RECT),
('wb_gain', '8f'),
('rotate', INT32),
('wb_view', WBGAIN),
('real_bpp', UINT32),
('conv_8_min', UINT32),
('conv_8_max', UINT32),
('filter_code', INT32),
('filter_param', INT32),
('uf', IMFILTER),
('black_cal_sver', UINT32),
('white_cal_sver', UINT32),
('gray_cal_sver', UINT32),
('b_stamp_time', BOOL),
('sound_dest', UINT32),
('frp_steps', UINT32),
] + [('frp_img_nr{:d}'.format(i), INT32) for i in range(16)] + [
] + [('frp_rate{:d}'.format(i), UINT32) for i in range(16)] + [
] + [('frp_exp{:d}'.format(i), UINT32) for i in range(16)] + [
('mc_cnt', INT32),
] + [('mc_percent{:d}'.format(i), FLOAT) for i in range(64)] + [
('ci_calib', UINT32),
('calib_width', UINT32),
('calib_height', UINT32),
('calib_rate', UINT32),
('calib_exp', UINT32),
('calib_edr', UINT32),
('calib_temp', UINT32),
] + [('header_serial{:d}'.format(i), UINT32) for i in range(4)] + [
('range_code', UINT32),
('range_size', UINT32),
('decimation', UINT32),
('master_serial', UINT32),
('sensor', UINT32),
('shutter_ns', UINT32),
('edr_shutter_ns', UINT32),
('frame_delay_ns', UINT32),
('im_pos_xacq', UINT32),
('im_pos_yacq', UINT32),
('im_width_acq', UINT32),
('im_height_acq', UINT32),
('description', '4096s'),
('rising_edge', BOOL),
('filter_time', UINT32),
('long_ready', BOOL),
('shutter_off', BOOL),
('res_4', '16s'),
('b_meta_WB', BOOL),
('hue', INT32),
('black_level', INT32),
('white_level', INT32),
('lens_description', '256s'),
('lens_aperture', FLOAT),
('lens_focus_distance', FLOAT),
('lens_focal_length', FLOAT),
('f_offset', FLOAT),
('f_gain', FLOAT),
('f_saturation', FLOAT),
('f_hue', FLOAT),
('f_gamma', FLOAT),
('f_gamma_R', FLOAT),
('f_gamma_B', FLOAT),
('f_flare', FLOAT),
('f_pedestal_R', FLOAT),
('f_pedestal_G', FLOAT),
('f_pedestal_B', FLOAT),
('f_chroma', FLOAT),
('tone_label', '256s'),
('tone_points', INT32),
('f_tone', ''.join(32*['2f'])),
('user_matrix_label', '256s'),
('enable_matrices', BOOL),
('f_user_matrix', '9'+FLOAT),
('enable_crop', BOOL),
('crop_left_top_right_bottom', '4i'),
('enable_resample', BOOL),
('resample_width', UINT32),
('resample_height', UINT32),
('f_gain16_8', FLOAT),
('frp_shape', '16'+UINT32),
('trig_TC', TC),
('f_pb_rate', FLOAT),
('f_tc_rate', FLOAT),
('cine_name', '256s')
]
#from VR doc: This field is maintained for compatibility with old versions but
#a new field was added for that information. The new field can be larger or may
#have a different measurement unit.
UPDATED_FIELDS = {
'frame_rate_16': 'frame_rate',
'shutter_16': 'shutter_ns',
'post_trigger_16': 'post_trigger',
'frame_delay_16': 'frame_delay_ns',
'edr_shutter_16': 'edr_shutter_ns',
'saturation': 'f_saturation',
'shutter': 'shutter_ns',
'edr_shutter': 'edr_shutter_ns',
'frame_delay': 'frame_delay_ns',
'bright': 'f_offset',
'contrast': 'f_gain',
'gamma': 'f_gamma',
'conv_8_max': 'f_gain16_8',
'hue': 'f_hue',
}
#from VR doc: to be ignored, not used anymore
TO_BE_IGNORED_FIELDS = {
'contrast_16': 'res_7',
'bright_16': 'res_8',
'rotate_16': 'res_9',
'time_annotation': 'res_10',
'trig_cine': 'res_11',
'shutter_on': 'res_12',
'binning': 'res_13',
'b_mains_freq': 'res_14',
'b_time_code': 'res_15',
'b_priority': 'res_16',
'w_leap_sec_dy': 'res_17',
'd_delay_tc': 'res_18',
'd_delay_pps': 'res_19',
'gen_bits': 'res_20',
'conv_8_min': '',
}
# from VR doc: last setup field appearing in software version
# TODO: keep up-to-date with newer and more precise doc, if available
END_OF_SETUP = {
551: 'software_version',
552: 'recording_time_zone',
578: 'rotate',
605: 'b_stamp_time',
606: 'mc_percent63',
607: 'head_serial3',
614: 'decimation',
624: 'master_serial',
625: 'sensor',
631: 'frame_delay_ns',
637: 'description',
671: 'hue',
691: 'lens_focal_length',
693: 'f_gain16_8',
701: 'f_tc_rate',
702: 'cine_name',
}
class Cine(FramesSequence):
"""Read cine files
Read cine files, the out put from Vision Research high-speed phantom
cameras. Support uncompressed monochrome and color files.
Nominally thread-safe, but this assertion is not tested.
Parameters
----------
filename : string
Path to cine (or chd) file.
Notes
-----
For a .chd file, this class only reads the header, not the images.
"""
# TODO: Unit tests using a small sample cine file.
@classmethod
def class_exts(cls):
return {'cine'} | super(Cine, cls).class_exts()
propagate_attrs = ['frame_shape', 'pixel_type', 'filename', 'frame_rate',
'get_fps', 'compression', 'cfa', 'off_set']
def __init__(self, filename):
py_ver = sys.version_info
super(Cine, self).__init__()
self.f = open(filename, 'rb')
self._filename = filename
### HEADER
self.header_dict = self._read_header(HEADER_FIELDS)
self.bitmapinfo_dict = self._read_header(BITMAP_INFO_FIELDS,
self.off_image_header)
self.setup_fields_dict = self._read_header(SETUP_FIELDS, self.off_setup)
self.setup_fields_dict = self.clean_setup_dict()
self._width = self.bitmapinfo_dict['bi_width']
self._height = self.bitmapinfo_dict['bi_height']
self._pixel_count = self._width * self._height
# Allows Cine object to be accessed from multiple threads!
self.file_lock = Lock()
self._hash = None
self._im_sz = (self._width, self._height)
# sort out the data type by reading the meta-data
if self.bitmapinfo_dict['bi_bit_count'] in (8, 24):
self._data_type = 'u1'
else:
self._data_type = 'u2'
self.tagged_blocks = self._read_tagged_blocks()
self.frame_time_stamps = self.tagged_blocks['image_time_only']
self.all_exposures = self.tagged_blocks['exposure_only']
self.stack_meta_data = dict()
self.stack_meta_data.update(self.bitmapinfo_dict)
self.stack_meta_data.update({k: self.setup_fields_dict[k]
for k in set(('trig_frame',
'gamma',
'frame_rate',
'shutter_ns'
)
)
})
self.stack_meta_data.update({k: self.header_dict[k]
for k in set(('first_image_no',
'image_count',
'total_image_count',
'first_movie_image'
)
)
})
self.stack_meta_data['trigger_time'] = self.trigger_time
### IMAGES
# Move to images offset to test EOF...
self.f.seek(self.off_image_offsets)
if self.f.read(1) != b'':
# ... If no, read images
self.image_locations = self._unpack('%dQ' % self.image_count,
self.off_image_offsets)
if type(self.image_locations) not in (list, tuple):
self.image_locations = [self.image_locations]
# TODO: add support for reading sequence within the same framework, when data
# has been saved in another format (.tif, image sequence, etc)
def clean_setup_dict(self):
r"""Clean setup dictionary by removing newer fields, when compared to the
software version, and trailing null character b'\x00' in entries.
Notes
-----
The method is called after building the setup from the raw cine header.
It can be overridden to match more specific purposes (e.g. filtering
out TO_BE_IGNORED_ and UPDATED_FIELDS).
See also
--------
`Vision Research Phantom documentation <http://phantomhighspeed-knowledge.force.com/servlet/fileField?id=0BE1N000000kD2i>`_
"""
setup = self.setup_fields_dict.copy()
# End setup at correct field (according to doc)
versions = sorted(END_OF_SETUP.keys())
fields = [v[0] for v in SETUP_FIELDS]
v = setup['software_version']
# Get next field where setup is known to have ended, according to VR
try:
v_up = versions[sorted(where(array(versions) >= v)[0])[0]]
last_field = END_OF_SETUP[v_up]
for k in fields[fields.index(last_field)+1:]:
del setup[k]
except IndexError:
# Or go to the end (waiting for updated documentation)
pass
# Remove blank characters
setup = _convert_null_byte(setup)
# Filter out 'res_' (reserved/obsolete) fields
#k_res = [k for k in setup.keys() if k.startswith('res_')]
#for k in k_res:
# del setup[k]
# Format f_tone properly
if 'f_tone' in setup.keys():
tone = setup['f_tone']
setup['f_tone'] = tuple((tone[2*k], tone[2*k+1])\
for k in range(setup['tone_points']))
return setup
@property
def filename(self):
return self._filename
@property
def frame_rate(self):
"""Frame rate (setting in Phantom PCC software) (Hz).
May differ from computed average one.
"""
return self.setup_fields_dict['frame_rate']
@property
def frame_rate_avg(self):
"""Actual frame rate, averaged on frame timestamps (Hz)."""
return self.get_frame_rate_avg()
# use properties for things that should not be changeable
@property
def cfa(self):
return self.setup_fields_dict['cfa']
@property
def compression(self):
return self.header_dict['compression']
@property
def pixel_type(self):
return | np.dtype(self._data_type) | numpy.dtype |
"""
This module contains a set of functions for vectorized string
operations and methods.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Some methods will only be available if the corresponding string method is
available in your version of Python.
The preferred alias for `defchararray` is `numpy.char`.
"""
import functools
import sys
from .numerictypes import (
string_, unicode_, integer, int_, object_, bool_, character)
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.compat import asbytes
import numpy
__all__ = [
'equal', 'not_equal', 'greater_equal', 'less_equal',
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
'array', 'asarray'
]
_globalvar = 0
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.char')
def _use_unicode(*args):
"""
Helper function for determining the output type of some string
operations.
For an operation on two ndarrays, if at least one is unicode, the
result should be unicode.
"""
for x in args:
if (isinstance(x, str) or
issubclass( | numpy.asarray(x) | numpy.asarray |
import numpy as np
import tensorflow as tf
import ants
def hippmapp3r_segmentation(t1,
do_preprocessing=True,
antsxnet_cache_directory=None,
verbose=False):
"""
Perform HippMapp3r (hippocampal) segmentation described in
https://www.ncbi.nlm.nih.gov/pubmed/31609046
with models and architecture ported from
https://github.com/mgoubran/HippMapp3r
Additional documentation and attribution resources found at
https://hippmapp3r.readthedocs.io/en/latest/
Preprocessing consists of:
* n4 bias correction and
* brain extraction
The input T1 should undergo the same steps. If the input T1 is the raw
T1, these steps can be performed by the internal preprocessing, i.e. set
do_preprocessing = True
Arguments
---------
t1 : ANTsImage
input image
do_preprocessing : boolean
See description above.
antsxnet_cache_directory : string
Destination directory for storing the downloaded template and model weights.
Since these can be resused, if is None, these data will be downloaded to a
~/.keras/ANTsXNet/.
verbose : boolean
Print progress to the screen.
Returns
-------
ANTs labeled hippocampal image.
Example
-------
>>> mask = hippmapp3r_segmentation(t1)
"""
from ..architectures import create_hippmapp3r_unet_model_3d
from ..utilities import preprocess_brain_image
from ..utilities import get_pretrained_network
from ..utilities import get_antsxnet_data
if t1.dimension != 3:
raise ValueError( "Image dimension must be 3." )
if antsxnet_cache_directory == None:
antsxnet_cache_directory = "ANTsXNet"
if verbose == True:
print("************* Preprocessing ***************")
print("")
t1_preprocessed = t1
if do_preprocessing == True:
t1_preprocessing = preprocess_brain_image(t1,
truncate_intensity=None,
do_brain_extraction=True,
template=None,
do_bias_correction=True,
do_denoising=False,
antsxnet_cache_directory=antsxnet_cache_directory,
verbose=verbose)
t1_preprocessed = t1_preprocessing["preprocessed_image"] * t1_preprocessing['brain_mask']
if verbose == True:
print("************* Initial stage segmentation ***************")
print("")
# Normalize to mprage_hippmapp3r space
if verbose == True:
print(" HippMapp3r: template normalization.")
template_file_name_path = get_antsxnet_data("mprage_hippmapp3r", antsxnet_cache_directory=antsxnet_cache_directory)
template_image = ants.image_read(template_file_name_path)
registration = ants.registration(fixed=template_image, moving=t1_preprocessed,
type_of_transform="antsRegistrationSyNQuick[t]", verbose=verbose)
image = registration['warpedmovout']
transforms = dict(fwdtransforms=registration['fwdtransforms'],
invtransforms=registration['invtransforms'])
# Threshold at 10th percentile of non-zero voxels in "robust range (fslmaths)"
if verbose == True:
print(" HippMapp3r: threshold.")
image_array = image.numpy()
image_robust_range = np.quantile(image_array[np.where(image_array != 0)], (0.02, 0.98))
threshold_value = 0.10 * (image_robust_range[1] - image_robust_range[0]) + image_robust_range[0]
thresholded_mask = ants.threshold_image(image, -10000, threshold_value, 0, 1)
thresholded_image = image * thresholded_mask
# Standardize image
if verbose == True:
print(" HippMapp3r: standardize.")
mean_image = np.mean(thresholded_image[thresholded_mask==1])
sd_image = np.std(thresholded_image[thresholded_mask==1])
image_normalized = (image - mean_image) / sd_image
image_normalized = image_normalized * thresholded_mask
# Trim and resample image
if verbose == True:
print(" HippMapp3r: trim and resample to (160, 160, 128).")
image_cropped = ants.crop_image(image_normalized, thresholded_mask, 1)
shape_initial_stage = (160, 160, 128)
image_resampled = ants.resample_image(image_cropped, shape_initial_stage, use_voxels=True, interp_type=1)
if verbose == True:
print(" HippMapp3r: generate first network and download weights.")
model_initial_stage = create_hippmapp3r_unet_model_3d((*shape_initial_stage, 1), do_first_network=True)
initial_stage_weights_file_name = get_pretrained_network("hippMapp3rInitial", antsxnet_cache_directory=antsxnet_cache_directory)
model_initial_stage.load_weights(initial_stage_weights_file_name)
if verbose == True:
print(" HippMapp3r: prediction.")
data_initial_stage = np.expand_dims(image_resampled.numpy(), axis=0)
data_initial_stage = np.expand_dims(data_initial_stage, axis=-1)
mask_array = model_initial_stage.predict(data_initial_stage, verbose=verbose)
mask_image_resampled = ants.copy_image_info(image_resampled, ants.from_numpy(np.squeeze(mask_array)))
mask_image = ants.resample_image(mask_image_resampled, image.shape, use_voxels=True, interp_type=0)
mask_image[mask_image >= 0.5] = 1
mask_image[mask_image < 0.5] = 0
#########################################
#
# Perform refined (stage 2) segmentation
#
if verbose == True:
print("")
print("")
print("************* Refine stage segmentation ***************")
print("")
mask_array = np.squeeze(mask_array)
centroid_indices = np.where(mask_array == 1)
centroid = np.zeros((3,))
centroid[0] = centroid_indices[0].mean()
centroid[1] = centroid_indices[1].mean()
centroid[2] = centroid_indices[2].mean()
shape_refine_stage = (112, 112, 64)
lower = (np.floor(centroid - 0.5 * np.array(shape_refine_stage)) - 1).astype(int)
upper = (lower + | np.array(shape_refine_stage) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.