prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
======================
Find Photodiode Events
======================
In this example, we use ``pd-parser`` to find photodiode events and
align them to behavior. Then, we save the data to BIDS format.
"""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
###############################################################################
# Simulate data and use it to make a raw object
#
# We'll make an `mne.io.Raw object` so that we can save out some random
# data with a photodiode event channel in it in `fif` format (a commonly used
# electrophysiology data format).
import os.path as op
import numpy as np
import mne
from mne.utils import _TempDir
import pd_parser
from pd_parser.parse_pd import _read_raw, _to_tsv
out_dir = _TempDir()
print(f'After running this example, you can find the data here: {out_dir}')
# simulate photodiode data
n_events = 300
prop_corrupted = 0.01
raw, beh, events, corrupted_indices = \
pd_parser.simulate_pd_data(n_events=n_events,
prop_corrupted=prop_corrupted)
# make fake electrophysiology data
info = mne.create_info(['ch1', 'ch2', 'ch3'], raw.info['sfreq'],
['seeg'] * 3)
raw2 = mne.io.RawArray(np.random.random((3, raw.times.size)) * 1e-6, info)
raw2.info['lowpass'] = raw.info['lowpass'] # these must match to combine
raw.add_channels([raw2])
# bids needs these data fields
raw.info['dig'] = None
raw.info['line_freq'] = 60
fname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')
raw.save(fname)
# roundtrip so that raw is properly loaded from disk and has a filename
raw = _read_raw(fname)
###############################################################################
# Make behavior data
#
# We'll make a dictionary with lists for the events that are time-stamped when
# the photodiode was turned on and other events relative to those events.
# We'll add some noise to the time-stamps so that we can see how behavior
# might look in an experimental setting.
# Let's make a task where there is a fixation stimulus, then a go cue,
# and a then response as an example.
| np.random.seed(12) | numpy.random.seed |
import pandas as pd
import tqdm, os, glob, json, re, time
import numpy as np
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
import enchant
import pickle as pkl
BASEPATH = '/Users/Janjua/Desktop/Projects/Octofying-COVID19-Literature/dataset'
stop_words = set(stopwords.words('english'))
engDict = enchant.Dict("en_US")
root_path = '/Users/Janjua/Desktop/Projects/Octofying-COVID19-Literature/dataset/CORD-19-research-challenge/'
def retrieve_data_from_json(file_path):
"""
Reads the json file and returns the necessary items.
# Arguments:
file_path: the path to the .json file
"""
with open(file_path) as file:
data = json.loads(file.read())
abstract, full_text = [], []
abstract = str([x['text'] for x in data['abstract']])
full_text = str([x['text'] for x in data['body_text']])
paper_id = data['paper_id']
return (paper_id, abstract, full_text)
def prepare_dataset():
"""
Reads the downloaded .csv file and performs some pre-processing on the data.
# Returns: A dataframe file is returned which has cleaned data columns
by removing the un-necessary information from the previous csv file.
# Credits:
Some aspects of code borrowed from:
https://www.kaggle.com/ivanegapratama/covid-eda-initial-exploration-tool
"""
data = pd.read_csv(BASEPATH + "/CORD-19-research-challenge/metadata.csv")
json_files = glob.glob(BASEPATH + "/CORD-19-research-challenge/*/*/*.json", recursive=True)
covid_data_dict = {'paper_id': [],
'abstract': [],
'body_text': [],
'authors': [],
'title': [],
'journal': []}
for idx, entry in enumerate(json_files):
if idx % (len(json_files) // 10) == 0:
print('Processing: {} of {}'.format(idx, len(json_files)))
paper_id, abstract, full_text = retrieve_data_from_json(entry)
meta = data.loc[data['sha'] == paper_id]
if len(meta) == 0:
continue
covid_data_dict['paper_id'].append(paper_id)
covid_data_dict['abstract'].append(abstract)
covid_data_dict['body_text'].append(full_text)
try:
authors = meta['authors'].values[0].split(';')
if len(authors) > 2:
covid_data_dict['authors'].append(authors[:1] + "...")
else:
covid_data_dict['authors'].append(". ".join(authors))
except:
covid_data_dict['authors'].append(". ".join(authors))
covid_data_dict['title'].append(meta['title'].values[0])
covid_data_dict['journal'].append(meta['journal'].values[0])
covid_df = pd.DataFrame(covid_data_dict, columns=['paper_id', 'abstract', 'body_text', \
'authors', 'title', 'journal'])
covid_df['abstract_word_count'] = covid_df['abstract'].apply(lambda x: len(x.strip().split()))
covid_df['body_text_word_count'] = covid_df['body_text'].apply(lambda x: len(x.strip().split()))
# Removing preposition marks
covid_df['body_text'] = covid_df['body_text'].apply(lambda x: re.sub('[^a-zA-z0-9\s]','', x))
covid_df['abstract'] = covid_df['abstract'].apply(lambda x: re.sub('[^a-zA-z0-9\s]','', x))
# Convert to lower case
covid_df['body_text'] = covid_df['body_text'].apply(lambda x: x.lower())
covid_df['abstract'] = covid_df['abstract'].apply(lambda x: x.lower())
covid_df.to_csv(BASEPATH + '/COVID_19_Lit.csv', encoding='utf-8', index=False)
print(covid_df.head())
print("Written dataframe to .csv file.")
def to_one_hot(data_point_index, vocab_size):
"""
Converts numbers to one hot vectors
# Returns: a one hot vector temp
# Credits:
Function taken from:
https://gist.github.com/aneesh-joshi/c8a451502958fa367d84bf038081ee4b
"""
temp = np.zeros(vocab_size)
temp[data_point_index] = 1
return temp
def load_data_for_training_w2v():
"""
Loads the data for training and testing for the word2vec model.
"""
data = pd.read_csv(BASEPATH + '/COVID_19_Lit.csv')
corpus = data.drop(["paper_id", "abstract", "abstract_word_count", "body_text_word_count", "authors", "title", "journal"], axis=1)
print(corpus.head(1))
words, n_gram = [], []
print(len(corpus))
start = time.time()
for ix in range(0, len(corpus)):
words.append(str(corpus.iloc[ix]['body_text'][1:-1]).split(" "))
print('Word Length: ', len(words))
for word in words:
for i in range(len(word)-2+1):
word1, word2 = word[i:i+2]
if word1 != "" and word2 != "":
if engDict.check(word1) == True and engDict.check(word2) == True:
n_gram.append("".join(word[i:i+2]))
end = time.time()
print("Prepared n-grams in: {}s".format(end-start))
print("N-gram length: ", len(n_gram))
n_gram = n_gram[:100000]
print("Reducing size to: ", len(n_gram))
word2int, int2word = {}, {}
print("N-gram length: ", len(n_gram))
start = time.time()
for i, word in enumerate(n_gram):
word2int[word] = i
int2word[i] = word
word_with_neighbor = list(map(list, zip(n_gram, n_gram[1:])))
end = time.time()
print("Computed neighbours in: {}s".format(end-start))
X, y = [], []
vocab_size = max(word2int.values()) + 1
print("Vocab size: ", vocab_size)
start = time.time()
for idx, word_neigh in enumerate(word_with_neighbor):
if idx % (len(word_with_neighbor) // 10) == 0:
print('Processing: {} of {}'.format(idx, len(word_with_neighbor)))
X.append(to_one_hot(word2int[word_neigh[0]], vocab_size))
y.append(to_one_hot(word2int[word_neigh[1]], vocab_size))
X = np.asarray(X)
y = np.asarray(y)
end = time.time()
print("Prepared the data vectors: {}s".format(end-start))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
print("Shapes: \nX_train: {}\ny_train: {}\nX_test: {}\ny_test: {}".format(X_train.shape, y_train.shape, X_test.shape, y_test.shape))
np.save('arrays/X_train_w2v.npy', X_train)
np.save('arrays/y_train_w2v.npy', y_train)
np.save('arrays/X_test_w2v.npy', X_test)
np.save('arrays/y_test_w2v.npy', y_test)
print("Saved arrays!")
def read_arrays_and_return():
"""
Reads the prepared numpy arrays
# Returns: the read np arrays
"""
X_train = np.load('arrays/X_train_w2v.npy')
y_train = np.load('arrays/y_train_w2v.npy')
X_test = | np.load('arrays/X_test_w2v.npy') | numpy.load |
from os.path import abspath, dirname, join, isdir
import numpy as np
import datetime
from .. import skyvec2ins
from ..gui import get_aperture
TARGETS_DIR = abspath(join(dirname(__file__), 'targets'))
START_DATE = datetime.datetime(2018, 10, 1)
NPOINTS = 360
NROLLS = 20
MAXVROLL = 10.0
def _save_test_case(test_case_name, aperture,
ra, dec, pa1, pa2, pa3,
separation_as1, separation_as2, separation_as3):
"""Compute skyvec2ins outputs for test case and save to seperate .csv files.
Parameters
----------
test_case_name : str
Name of the test case
aperture : jwxml.Aperture object
Aperture as loaded from the instrument SIAF
ra : float
Right ascension of science target in decimal degrees (0-360).
dec : float
Declination of science target in decimal degrees (-90, 90).
pa1, pa2, pa3 : float
Position angles of target companions in degrees east of north.
separation_as1, separation_as2, separation_as3 : float
Separations of target companions in arcseconds.
"""
case_path = join(TARGETS_DIR, test_case_name)
arrnames = (
'x',
'observable',
'elongation_rad',
'roll_rad',
'c1_x', 'c1_y',
'c2_x', 'c2_y',
'c3_x', 'c3_y',
'n_x', 'n_y',
'e_x', 'e_y'
)
computed = skyvec2ins.skyvec2ins(
ra=ra,
dec=dec,
pa1=pa1,
separation_as1=separation_as1,
pa2=pa2,
separation_as2=separation_as2,
pa3=pa3,
separation_as3=separation_as3,
aper=aperture,
start_date=START_DATE,
npoints=NPOINTS,
nrolls=NROLLS,
maxvroll=MAXVROLL,
)
for name, arr in zip(arrnames, computed):
outpath = join(case_path, '{}.csv'.format(name))
np.savetxt(outpath, arr, delimiter=',')
print('Saved', outpath)
def _generate_test_outputs():
"""Generate skyvec2ins outputs for each test case."""
# Fomalhaut
_save_test_case(
'Fomalhaut',
get_aperture('NIRCam', 'NRCA2_MASK210R'),
ra=344.41269,
dec=-29.62224,
pa1=325,
pa2=0,
pa3=0,
separation_as1=10,
separation_as2=0,
separation_as3=0,
)
# 1RXSJ160929p1-210524
_save_test_case(
'1RXSJ160929p1-210524',
get_aperture('NIRCam', 'NRCB3_MASKSWB'),
ra=242.37628,
dec=-21.08304,
pa1=20,
pa2=0,
pa3=0,
separation_as1=3,
separation_as2=0,
separation_as3=0,
)
# HR8799
_save_test_case(
'HR8799',
get_aperture('MIRI', 'MIRIM_MASK1065'),
ra=346.86965,
dec=21.13425,
pa1=45,
separation_as1=1.7,
pa2=325,
separation_as2=1,
pa3=190,
separation_as3=0.65,
)
# NGC 6543
_save_test_case(
'NGC6543',
get_aperture('MIRI', 'MIRIM_MASKLYOT'),
ra=269.63926,
dec=66.63320,
pa1=0,
separation_as1=0,
pa2=0,
separation_as2=0,
pa3=0,
separation_as3=0,
)
def _load_test_case(test_case_name):
"""Load the output files for a given test case.
Parameters
----------
test_case_name: str
Name of the test case.
Returns
-------
Loaded test case outputs.
"""
case_path = join(TARGETS_DIR, test_case_name)
assert isdir(case_path)
arrs = (
'x',
'observable',
'elongation_rad',
'roll_rad',
'c1_x', 'c1_y',
'c2_x', 'c2_y',
'c3_x', 'c3_y',
'n_x', 'n_y',
'e_x', 'e_y'
)
return (np.genfromtxt(join(case_path, '{}.csv'.format(n)), delimiter=',') for n in arrs)
def _compare_outputs(reference, computed):
"""Compare computed outputs to the reference outputs (those on file).
Parameters
----------
reference : tuple
Reference outputs for test case.
computed : tuple
Computed outputs for test case.
"""
(
x,
observable,
elongation_rad,
roll_rad,
c1_x, c1_y,
c2_x, c2_y,
c3_x, c3_y,
n_x, n_y,
e_x, e_y
) = reference
(
t_x,
t_observable,
t_elongation_rad,
t_roll_rad,
t_c1_x, t_c1_y,
t_c2_x, t_c2_y,
t_c3_x, t_c3_y,
t_n_x, t_n_y,
t_e_x, t_e_y
) = computed
assert np.allclose(x, t_x)
assert np.allclose(elongation_rad, t_elongation_rad)
assert np.allclose(roll_rad, t_roll_rad, atol=2e-6)
assert not np.any((observable == 1) ^ (t_observable == 1))
nircam_pixelscale = 0.0311 # for short-wavelen channels, SIAF PRDDEVSOC-D-012, 2016 April
siaf_transform_epsilon = nircam_pixelscale / 100
# rationale: comparison of the SIAF transforms shows they should be
# mathematically correct in both implementations, but numerical errors are
# somehow being compounded to result in errors that are nevertheless small
# relative to the size of a pixel (<< 0.01 px). We set the tolerance at
# 1/100 of a NIRCam pixel.
# n.b. the residuals are larger in Y for this test case
# see https://github.com/mperrin/jwxml/issues/4
assert np.allclose(c1_x, t_c1_x, atol=siaf_transform_epsilon)
assert | np.allclose(c1_y, t_c1_y, atol=siaf_transform_epsilon) | numpy.allclose |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.fftpack import fft
from combined_functions import check_ft_grid
from scipy.constants import pi, c, hbar
from numpy.fft import fftshift
from scipy.io import loadmat
from time import time
import sys
import matplotlib.pyplot as plt
from scipy.integrate import simps
def fv_creator(fp, df, F, int_fwm):
"""
Cretes frequency grid such that the estimated MI-FWM bands
will be on the grid and extends this such that to avoid
fft boundary problems.
Inputs::
lamp: wavelength of the pump (float)
lamda_c: wavelength of the zero dispersion wavelength(ZDW) (float)
int_fwm: class that holds nt (number of points in each band)
betas: Taylor coeffiencts of beta around the ZDW (Array)
M : The M coefficient (or 1/A_eff) (float)
P_p: pump power
Df_band: band frequency bandwidth in Thz, (float)
Output::
fv: Frequency vector of bands (Array of shape [nt])
"""
f_centrals = [fp + i * F for i in range(-1, 2)]
fv1 = np.linspace(f_centrals[0], f_centrals[1],
int_fwm.nt//4 - 1, endpoint=False)
df = fv1[1] - fv1[0]
fv2 = np.linspace(f_centrals[1], f_centrals[2], int_fwm.nt//4)
try:
assert df == fv2[1] - fv2[0]
except AssertionError:
print(df, fv2[1] - fv2[0])
fv0, fv3 = np.zeros(int_fwm.nt//4 + 1), | np.zeros(int_fwm.nt//4) | numpy.zeros |
from time import time
start = time()
import numpy as np
import pandas as pd
import glob
import PIL
import matplotlib.pyplot as plt
import cv2
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
from scipy.ndimage import median_filter, gaussian_filter
import sys
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
path_name = 'test5.jpg'
img = cv2.imread(path_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
w, h, d = original_shape = tuple(img.shape)
#print("Loading image ...")
# Median Filtering
img = median_filter(img, 1)
# Canny Edge Detection
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 1)
edges = cv2.Canny(blurred, 20, 200)
#plt.imshow(edges)
# Color Quantization using KMeans Clustering
imag = np.array(img, dtype=np.float64) / 255
img_array = np.reshape(imag, (w*h, d))
img_array_sample = shuffle(img_array, random_state=0)[:10000]
kmeans = KMeans(n_clusters=50, random_state=42).fit(img_array_sample)
labels = kmeans.predict(img_array)
new_image = recreate_image(kmeans.cluster_centers_, labels, w, h)
#plt.imshow(new_image)
## Applying dilation thrice
kernel = np.ones((3,3),np.uint8)
dl_img = cv2.dilate(edges, kernel, iterations=1)
kernel = | np.ones((2,2),np.uint8) | numpy.ones |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = np.array([back])
liststrgvarb = []
if iele != 'none':
for l in gmod.indxpopl:
if iele == 'full':
listiele = np.arange(gmod.maxmpara.numbelem)
else:
listiele = [iele]
for k in listiele:
liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))
if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':
for l in indxpopltemp:
liststrgvarb.append(strgvarb + 'pop%d' % l)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':
for e in indxisfrtemp:
liststrgvarb.append(strgvarb + 'isf%d' % e)
if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':
for i in indxenertemp:
for m in indxevtttemp:
liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))
if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))
if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
liststrgvarb.append(strgvarb + 'back%04d' % c)
if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'en%02d' % i)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':
liststrgvarb.append(strgvarb)
if gdat.booldiagmode:
for strgvarb in liststrgvarb:
if liststrgvarb.count(strgvarb) != 1:
print('liststrgvarb')
print(liststrgvarb)
print('popl')
print(popl)
print('ener')
print(ener)
print('evtt')
print(evtt)
print('back')
print(back)
print('isfr')
print(isfr)
print('iele')
print(iele)
raise Exception('')
return liststrgvarb
def setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \
popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \
boolinvr=False, \
strgmodl=None, strgstat=None, \
):
'''
Set up variable values across all models (true and fitting) as well as all populations, energy bins,
event bins, background components, and Sersic components
'''
# determine the list of models
if strgmodl is None:
if gdat.typedata == 'mock':
liststrgmodl = ['true', 'fitt', 'plot']
else:
liststrgmodl = ['fitt', 'plot']
else:
if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':
liststrgmodl = [strgmodl]
else:
liststrgmodl = ['fitt', 'plot']
print('liststrgmodl')
print(liststrgmodl)
for strgmodl in liststrgmodl:
if strgmodl == 'plot':
gmod = gdat.fitt
gmodoutp = gdat
else:
gmod = getattr(gdat, strgmodl)
gmodoutp = gmod
# get the list of names of the variable
liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')
if iele != 'none':
liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)
else:
liststrgvarb = liststrgvarbnone
# set the values of each variable in the list
for strgvarb in liststrgvarb:
if minm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)
if maxm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)
if mean is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)
if stdv is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)
if valu is not None:
if strgstat is None:
print('strgvarb')
print(strgvarb)
print('strgmodl')
print(strgmodl)
print('valu')
print(valu)
print('')
setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)
elif strgstat == 'this':
setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)
if scal is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)
if lablroot is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)
if lablunit is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)
if cmap is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)
setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)
# create limt, bins, mean, and delt
if minm is not None and maxm is not None or mean is not None and stdv is not None:
# determine minima and maxima for Gaussian or log-Gaussian distributed parameters
if mean is not None:
minm = mean - gdat.numbstdvgaus * stdv
maxm = mean + gdat.numbstdvgaus * stdv
# uniformly-distributed
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsunif = np.linspace(minm, maxm, numbbins + 1)
if scal == 'logt' or scal == 'powr':
binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsunif = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numbbins + 1)
if boolinvr:
binsunif = binsunif[::-1]
meanparaunif = (binsunif[1:] + binsunif[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanpara = meanparaunif
bins = binsunif
minmunif = minm
maxmunif = maxm
if scal == 'logt' or scal == 'powr':
meanpara = 10**meanparaunif
bins = 10**binsunif
minmunif = np.log10(minm)
maxmunif = np.log10(maxm)
if scal == 'asnh':
meanpara = np.sinh(meanparaunif)
bins = np.sinh(binsunif)
minmunif = np.arcsinh(minm)
maxmunif = np.arcsinh(maxm)
delt = np.diff(bins)
limt = np.array([minm, maxm])
# 'self' is not yet defined
if scal == 'asnh' or scal == 'logt' or scal == 'powr':
listvalutickmajr, listlabltickmajr, listvalutickminr, listlabltickminr = tdpy.retr_valulabltick(minm, maxm, scal)
setattr(gmodoutp.labltickmajrpara, strgvarb, listlabltickmajr)
setattr(gmodoutp.valutickmajrpara, strgvarb, listvalutickmajr)
setattr(gmodoutp.labltickminrpara, strgvarb, listlabltickminr)
setattr(gmodoutp.valutickminrpara, strgvarb, listvalutickminr)
#labltick = np.empty(gdat.numbtickcbar, dtype=object)
#for k in range(gdat.numbtickcbar):
# if scal == 'asnh':
# valutick[k] = np.sinh(tickunif[k])
# if scal == 'logt' or scal == 'powr':
# valutick[k] = 10**(tickunif[k])
# # avoid very small, but nonzero central values in the residual count color maps
# if strgcbar == 'cntpresi' and np.fabs(valutick[k]) < 1e-5:
# valutick[k] = 0.
# if strgcbar == 'cntpdata' and np.amax(valutick) > 1e3:
# labltick[k] = '%d' % valutick[k]
# else:
# labltick[k] = '%.3g' % valutick[k]
setattr(gmodoutp.limtpara, strgvarb, limt)
setattr(gmodoutp.binspara, strgvarb, bins)
setattr(gmodoutp.meanpara, strgvarb, meanpara)
setattr(gmodoutp.deltpara, strgvarb, delt)
def retr_ticklabltemp(gdat, strgcbar):
minm = getattr(gdat.minmpara, strgcbar)
maxm = getattr(gdat.maxmpara, strgcbar)
scal = getattr(gdat.scalpara, strgcbar)
numb = gdat.numbtickcbar - 1
retr_axis(gdat, strgcbar, numb=numb)
minmscal = minm
if scal == 'asnh':
minmscal = np.arcsinh(minmscal)
if scal == 'logt':
minmscal = np.log10(minmscal)
maxmscal = maxm
if scal == 'asnh':
maxmscal = np.arcsinh(maxmscal)
if scal == 'logt':
maxmscal = np.log10(maxmscal)
tickscal = np.linspace(minmscal, maxmscal, gdat.numbtickcbar)
labl = np.empty(gdat.numbtickcbar, dtype=object)
tick = np.copy(tickscal)
for k in range(gdat.numbtickcbar):
if scal == 'asnh':
tick[k] = np.sinh(tickscal[k])
elif scal == 'logt':
tick[k] = 10**(tickscal[k])
# avoid very small, but nonzero central values in the residual count color maps
if strgcbar == 'cntpresi' and np.fabs(tick[k]) < 1e-5:
tick[k] = 0.
if strgcbar == 'cntpdata' and np.amax(tick) > 1e3:
labl[k] = '%d' % tick[k]
else:
labl[k] = '%.3g' % tick[k]
setattr(gdat.tickpara, strgcbar, tick)
def retr_axistemp(gdat, strgvarb, strgmodl=None, boolinvr=False):
if strgmodl is None:
listgdattemp = [gdat]
for strgmodl in gdat.liststrgmodl:
listgdattemp.append(getattr(gdat, strgmodl))
elif strgmodl == 'fitt' or strgmodl == 'true':
listgdattemp = [getattr(gdat, strgmodl)]
elif strgmodl == 'allm':
listgdattemp = []
for strgmodl in gdat.liststrgmodl:
listgdattemp = getattr(gdat, strgmodl)
for gdattemp in listgdattemp:
minm = getattr(gdattemp.minmpara, strgvarb)
maxm = getattr(gdattemp.maxmpara, strgvarb)
numb = getattr(gdattemp.numbbinspara, strgvarb)
scal = getattr(gdattemp.scalpara, strgvarb)
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsscal = np.linspace(minm, maxm, numb + 1)
if scal == 'logt':
print('minm')
print(minm)
print('maxm')
print(maxm)
print('strgvarb')
print(strgvarb)
binsscal = np.linspace(np.log10(minm), np.log10(maxm), numb + 1)
print('')
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsscal = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numb + 1)
if boolinvr:
binsscal = binsscal[::-1]
meanvarbscal = (binsscal[1:] + binsscal[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanvarb = meanvarbscal
bins = binsscal
if scal == 'logt':
meanvarb = 10**meanvarbscal
bins = 10**binsscal
if scal == 'asnh':
meanvarb = np.sinh(meanvarbscal)
bins = np.sinh(binsscal)
delt = np.diff(bins)
limt = np.array([np.amin(bins), np.amax(bins)])
setattr(gdattemp.limtpara, strgvarb, limt)
setattr(gdattemp.binspara, strgvarb, bins)
setattr(gdattemp.meanpara, strgvarb, meanvarb)
setattr(gdattemp.deltpara, strgvarb, delt)
def setp_varbcore(gdat, strgmodl, gdattemp, strgvarbtemp, valu):
# check if the variable is defined by the user
try:
valutemp = getattr(gdattemp, strgvarbtemp)
if valutemp is None:
raise
if gdat.typeverb > 0:
print('Received custom value for %s, %s: %s' % (strgvarbtemp, strgmodl, valutemp))
# if not defined or defined as None, define it
except:
setattr(gdattemp, strgvarbtemp, valu)
def intp_sinc(gdat, lgal, bgal):
intpsinc = 4. * gdat.numbsidepsfn**2 * np.sum(gdat.temppsfn * sinc(gdat.numbsidepsfn * (gdat.gridpsfnlgal + lgal) - gdat.gridpsfnlgal) * \
sinc(gdat.numbsidepsfn * (gdat.gridpsfnbgal + bgal) - gdat.gridpsfnbgal))
return intpsinc
def retr_fluxbrgt(gdat, lgal, bgal, flux):
if lgal.size == 0:
fluxbrgt = np.array([0.])
fluxbrgtassc = np.array([0.])
else:
indxbrgt = np.argmax(flux)
fluxbrgt = flux[indxbrgt]
return fluxbrgt, fluxbrgtassc
def init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot):
figrsize = (gdat.sizeimag, gdat.sizeimag)
figr, axis = plt.subplots(figsize=figrsize)
nameplot = strgplot
if gdat.numbener > 1:
nameplot += 'en%02d' % gdat.indxenerincl[indxenerplot]
if gdat.numbener > 1:
if indxevttplot == -1:
nameplot += 'evtA'
else:
nameplot += 'evt%d' % gdat.indxevttincl[indxevttplot]
if gdat.fitt.numbpopl > 1:
if indxpoplplot == -1:
nameplot += 'popA'
else:
nameplot += 'pop%d' % indxpoplplot
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
print('gdat.fitt.labltotlpara.lgalpop0')
print(gdat.fitt.labltotlpara.lgalpop0)
print('gdat.fitt.labltotlpara.bgalpop0')
print(gdat.fitt.labltotlpara.bgalpop0)
axis.set_xlabel(gdat.fitt.labltotlpara.lgalpop0)
axis.set_ylabel(gdat.fitt.labltotlpara.bgalpop0)
titl = ''
if indxenerplot is not None and gdat.numbener > 1 and strgplot.endswith('cnts'):
titl = gdat.strgener[indxenerplot]
if indxevttplot is not None and gdat.numbevtt > 1 and strgplot.endswith('cnts'):
titl += ' ' + gdat.strgevtt[indxevttplot]
axis.set_title(titl)
return figr, axis, path
def draw_frambndr(gdat, axis):
outr = max(gdat.frambndrmodl, gdat.frambndrdata)
axis.set_xlim([-outr, outr])
axis.set_ylim([-outr, outr])
innr = min(gdat.frambndrmodl, gdat.frambndrdata)
axis.axvline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axvline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
def retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot=None, indxevttplot=-1, booltdim=False, imag=None):
draw_frambndr(gdat, axis)
# take the relevant energy and PSF bins
if indxenerplot is not None:
if indxevttplot == -1:
maps = np.sum(maps[indxenerplot, ...], axis=1)
else:
maps = maps[indxenerplot, :, indxevttplot]
# project the map to 2D
if gdat.typepixl == 'heal':
maps = tdpy.retr_cart(maps, indxpixlrofi=gdat.indxpixlrofi, numbsideinpt=gdat.numbsideheal, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata)
if gdat.typepixl == 'cart':
shap = [gdat.numbsidecart] + list(maps.shape)
shap[1] = gdat.numbsidecart
shapflat = list(maps.shape)
shapflat[0] = gdat.numbpixlfull
mapstemp = np.zeros(shapflat)
if maps.size == gdat.indxpixlrofi.size:
mapstemp[gdat.indxpixlrofi, ...] = maps
else:
mapstemp[:, ...] = maps
maps = mapstemp.reshape(shap).swapaxes(0, 1)
# temp -- this is needed to bring the Fermi-LAT map to the right direction
#maps = fliplr(maps)
# rescale the map
if strgmodl is not None:
gmod = getattr(gdat, strgmodl)
else:
gmod = gdat
scal = getattr(gdat.scalpara, strgcbar)
cmap = getattr(gdat.cmappara, strgcbar)
vmin = getattr(gdat.minmpara, strgcbar)
vmax = getattr(gdat.maxmpara, strgcbar)
if scal == 'asnh':
maps = np.arcsinh(maps)
if scal == 'logt':
maps = np.log10(maps)
if imag is None:
imag = axis.imshow(maps, cmap=cmap, origin='lower', extent=gdat.exttrofi, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=gdat.alphmaps)
return imag
else:
imag.set_data(maps)
def make_cbar(gdat, axis, imag, strgvarb):
# make a color bar
valutickmajr = getattr(gdat.valutickmajrpara, strgvarb)
labltickmajr = getattr(gdat.labltickmajrpara, strgvarb)
print('valutickmajr')
print(valutickmajr)
print('labltickmajr')
print(labltickmajr)
cbar = plt.colorbar(imag, ax=axis, fraction=0.05, aspect=15)
cbar.set_ticks(valutickmajr)
cbar.set_ticklabels(labltickmajr)
return cbar
def make_legdmaps(gdat, strgstat, strgmodl, axis, mosa=False, assc=False):
gmod = getattr(gdat, strgmodl)
# transdimensional elements
if strgmodl == 'fitt' and (strgstat == 'pdfn' and gdat.boolcondcatl or strgstat == 'this') and gmod.numbparaelem > 0:
for l in gmod.indxpopl:
colr = retr_colr(gdat, strgstat, strgmodl, l)
if strgstat == 'pdfn':
labl = 'Condensed %s %s' % (gmod.legd, gmod.legdpopl[l])
else:
labl = 'Sample %s %s' % (gmod.legd, gmod.legdpopl[l])
if not gmod.maxmpara.numbelem[l] == 0:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=labl, marker=gmod.listelemmrkr[l], lw=gdat.mrkrlinewdth, color=colr)
for q in gdat.indxrefr:
if not np.amax(gdat.refr.numbelem[q]) == 0:
if assc:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=gdat.refr.lablhits[q], marker=gdat.refr.listmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablmiss[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
else:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablelem[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
# fixed-dimensional objects
if strgmodl == 'fitt':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gmod.lablmodl, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gmod.lablmodl, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gdat.typedata == 'mock':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gdat.refr.labl, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gdat.refr.labl, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
temphand, temp = axis.get_legend_handles_labels()
numblabl = len(temp)
if numblabl == 4:
numbcols = 2
else:
numbcols = 3
if mosa:
axis.legend(bbox_to_anchor=[1., 1.15], loc='center', ncol=numbcols)
else:
axis.legend(bbox_to_anchor=[0.5, 1.15], loc='center', ncol=numbcols)
def supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot=-1, assc=False):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
# associations with the reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] > 0:
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
reframpl = gdat.refr.dictelem[q][gdat.refr.nameparagenrelemampl[q]][0, :]
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl, gdat.refr.nameparagenrelemampl[q])
lgal = np.copy(gdat.refr.dictelem[q]['lgal'][0, :])
bgal = np.copy(gdat.refr.dictelem[q]['bgal'][0, :])
numbelem = int(gdat.refr.numbelem[q])
if gdatmodi is not None and gmod.numbparaelem > 0 and assc:
### hit
indx = gdatmodi.this.indxelemrefrasschits[q][l]
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, label=gdat.refr.lablhits, \
marker=gdat.refrlistmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
### missed
indx = gdatmodi.this.indxelemrefrasscmiss[q][l]
else:
indx = np.arange(lgal.size)
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.listlablmiss, marker=gdat.refr.listmrkrmiss[q], \
lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
sizexoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
sizeyoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
axis.text(gdat.anglfact * lgal[indx[k]] + sizexoff, gdat.anglfact * bgal[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], \
verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
# temp -- generalize this to input refrlgalhost vs.
if gdat.typedata == 'mock':
## host galaxy position
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
lgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost, gdat.anglfact * bgalhost, facecolor='none', alpha=0.7, \
label='%s Host %d' % (gdat.refr.labl, e), s=300, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.boollens:
## host galaxy Einstein radius
for e in gmod.indxsersfgrd:
truelgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
truebgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
truebeinhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * truelgalhost, \
gdat.anglfact * truebgalhost), \
gdat.anglfact * truebeinhost, \
edgecolor=gdat.refr.colr, facecolor='none', lw=gdat.mrkrlinewdth))
if gmod.boollens:
## source galaxy position
axis.scatter(gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.lgalsour], \
gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.bgalsour], \
facecolor='none', \
alpha=0.7, \
#alpha=gdat.alphelem, \
label='%s Source' % gdat.refr.labl, s=300, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
# model catalog
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
if gdatmodi is not None:
if gmod.numbparaelem > 0:
colr = retr_colr(gdat, strgstat, strgmodl, l)
mrkrsize = retr_mrkrsize(gdat, strgmodl, gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]], gmod.nameparagenrelemampl[l])
if 'lgal' in gdatmodi.this.indxparagenrfullelem:
lgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['lgal']]
bgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['bgal']]
else:
gang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['gang']]
aang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['aang']]
lgal, bgal = retr_lgalbgal(gang, aang)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, alpha=gdat.alphelem, label='Sample', marker=gmod.listelemmrkr[l], \
lw=gdat.mrkrlinewdth, color=colr)
## source
if gmod.boollens:
lgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.bgalsour]
axis.scatter(gdat.anglfact * lgalsour, gdat.anglfact * bgalsour, facecolor='none', \
alpha=gdat.alphelem, \
label='%s Source' % gmod.lablpara, s=300, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
## host
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e], facecolor='none', \
alpha=gdat.alphelem, \
label='%s Host' % gmod.lablpara, s=300, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.boollens:
beinhost = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e]), \
gdat.anglfact * beinhost, edgecolor=gmod.colr, facecolor='none', \
lw=gdat.mrkrlinewdth, ls='--'))
# temp
if strgstat == 'pdfn' and gdat.boolcondcatl and gmod.numbparaelem > 0:
lgal = np.zeros(gdat.numbprvlhigh)
bgal = np.zeros(gdat.numbprvlhigh)
ampl = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
lgal[cntr] = gdat.dictglob['poststkscond'][r]['lgal'][0]
bgal[cntr] = gdat.dictglob['poststkscond'][r]['bgal'][0]
# temp -- this does not allow sources with different spectra to be assigned to the same stacked sample
ampl[cntr] = gdat.dictglob['poststkscond'][r][gmod.nameparagenrelemampl[l]][0]
cntr += 1
mrkrsize = retr_mrkrsize(gdat, strgmodl, ampl, gmod.nameparagenrelemampl[l])
colr = retr_colr(gdat, strgstat, strgmodl, l)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
label='Condensed', marker=gmod.listelemmrkr[l], color='black', lw=gdat.mrkrlinewdth)
for r in gdat.indxstkscond:
lgal = np.array([gdat.dictglob['liststkscond'][r]['lgal']])
bgal = np.array([gdat.dictglob['liststkscond'][r]['bgal']])
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
marker=gmod.listelemmrkr[l], color='black', alpha=0.1, lw=gdat.mrkrlinewdth)
def retr_colr(gdat, strgstat, strgmodl, indxpopl=None):
if strgmodl == 'true':
if indxpopl is None:
colr = gdat.refr.colr
else:
colr = gdat.refr.colrelem[indxpopl]
if strgmodl == 'fitt':
if strgstat == 'this' or strgstat == 'pdfn':
if indxpopl is None:
colr = gmod.colr
else:
colr = gmod.colrelem[indxpopl]
if strgstat == 'mlik':
colr = 'r'
return colr
def retr_levipost(listllik):
minmlistllik = np.amin(listllik)
levipost = np.log(np.mean(1. / np.exp(listllik - minmlistllik))) + minmlistllik
return levipost
def retr_infofromlevi(pmeallik, levi):
info = pmeallik - levi
return info
def retr_jcbn():
fluxpare, lgalpare, bgalpare, fluxauxi, lgalauxi, bgalauxi = sympy.symbols('fluxpare lgalpare bgalpare fluxauxi lgalauxi bgalauxi')
matr = sympy.Matrix([[ fluxpare, fluxauxi, 0, 0, 0, 0], \
[-fluxpare, 1 - fluxauxi, 0, 0, 0, 0], \
[-lgalauxi, 0, 1, 1 - fluxauxi, 0, 0], \
[-lgalauxi, 0, 1, -fluxauxi, 0, 0], \
[-bgalauxi, 0, 0, 0, 1, 1 - fluxauxi], \
[-bgalauxi, 0, 0, 0, 1, -fluxauxi]])
jcbn = matr.det()
return jcbn
# f1 = uf f0
# f2 = (1 - uf) f0
# x1 = x0 + (1 - uf) ux
# x2 = x0 - uf ux
# y1 = y0 + (1 - uf) uy
# y2 = y0 - uf uy
# f1/uf f1/f0 f1/x0 f1/ux f1/y0 f1/uy
# f2/uf f2/f0 f2/x0 f2/ux f2/y0 f2/uy
# x1/uf x1/f0 x1/x0 x1/ux x1/y0 x1/uy
# x2/uf x2/f0 x2/x0 x2/ux x2/y0 x2/uy
# y1/uf y1/f0 y1/x0 y1/ux y1/y0 y1/uy
# y2/uf y2/f0 y2/x0 y2/ux y2/y0 y2/uy
# f0 uf 0 0 0 0
# -f0 1 - uf 0 0 0 0
# -ux 0 1 1 - uf 0 0
# -ux 0 1 -uf 0 0
# -uy 0 0 0 1 1 - uf
# -uy 0 0 0 1 -uf
# f0
#retr_jcbn()
def retr_angldist(gdat, lgalfrst, bgalfrst, lgalseco, bgalseco):
# temp -- heal does not work when the dimension of lgalfrst is 1
if gdat.typepixl == 'heal':
dir1 = np.array([lgalfrst, bgalfrst])
dir2 = np.array([lgalseco, bgalseco])
angldist = hp.rotator.angdist(dir1, dir2)
else:
angldist = np.sqrt((lgalfrst - lgalseco)**2 + (bgalfrst - bgalseco)**2)
return angldist
def retr_deflextr(gdat, indxpixlelem, sher, sang):
factcosi = sher * np.cos(2. * sang)
factsine = sher * np.cos(2. * sang)
defllgal = factcosi * gdat.lgalgrid[indxpixlelem] + factsine * gdat.bgalgrid[indxpixlelem]
deflbgal = factsine * gdat.lgalgrid[indxpixlelem] - factcosi * gdat.bgalgrid[indxpixlelem]
return np.vstack((defllgal, deflbgal)).T
def readfile(path):
print('Reading %s...' % path)
filepick = open(path + '.p', 'rb')
filearry = h5py.File(path + '.h5', 'r')
gdattemptemp = pickle.load(filepick)
for attr in filearry:
setattr(gdattemptemp, attr, filearry[attr][()])
filepick.close()
filearry.close()
if 'gdatfinl' in path or 'gdatinit' in path:
if hasattr(gdattemptemp, 'edis') and gdattemptemp.edis is not None and hasattr(gdattemptemp, 'binsener'):
gdattemptemp.edisintp = sp.interpolate.interp1d(gdattemptemp.binsener, gdattemptemp.edis, fill_value='extrapolate')
gdattemptemp.adisobjt = sp.interpolate.interp1d(gdattemptemp.redsintp, gdattemptemp.adisintp, fill_value='extrapolate')
gdattemptemp.redsfromdlosobjt = sp.interpolate.interp1d(gdattemptemp.adisintp * gdattemptemp.redsintp, \
gdattemptemp.redsintp, fill_value='extrapolate')
return gdattemptemp
def init_stat(gdat):
# construct the initial state
if gdat.typeverb > 0:
print('Initializing the sampler state...')
print('inittype')
print(gdat.inittype)
gmod = gdat.fitt
## initialization
### initialize the unit sample vector randomly
gmod.this.paragenrunitfull = np.random.rand(gmod.numbparagenrfull)
gmod.this.paragenrscalfull = np.empty(gmod.numbparagenrfull)
## impose user-specified initial state
### number of elements
## create dummy indxparagenrfullelem
gmod.this.indxparagenrfullelem = None
if gmod.numbparaelem > 0:
if gdat.inittype == 'refr':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gmod.paragenrunitfull[gmod.indxpara.numbelem[l]]
else:
for l in gmod.indxpopl:
if gmod.typemodltran == 'pois':
meanelemtemp = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, \
gmod.this.indxparagenrfullelem)[gmod.indxpara.meanelem[l]]
print('temp -- user input is not working for numbelem')
#namevarb = 'numbelempop%d' % l
#initvalu = getattr(gmod.init, namevarb)
#if initvalu > gmod.maxmpara.numbelem[l] or initvalu < gmod.minmpara.numbelem[l]:
# raise Exception('Bad initial number of elements...')
#gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = initvalu
if gmod.typemodltran == 'pois':
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = np.random.poisson(meanelemtemp)
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = round(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
min(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.maxmpara.numbelem[l])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
max(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.minmpara.numbelem[l])
gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]] = gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]]
if gdat.booldiagmode:
if gdat.typedata == 'mock' and gdat.inittype == 'refr':
for l in gmod.indxpopl:
if gmod.paragenrunitfull[gmod.indxpara.numbelem[l]] > gmod.maxmpara.numbelem[l]:
raise Exception('')
if gmod.numbparaelem > 0:
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.inittype == 'reco':
if gdat.namerecostat is not None:
strgcnfg = gdat.namerecostat
else:
strgcnfg = gdat.strgcnfg
path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'
if os.path.exists(path):
boolinitreco = True
thisfile = h5py.File(path, 'r')
if gdat.typeverb > 0:
print('Initializing from the state %s...' % path)
print('Likelihood:')
print(thisfile['lliktotl'][...])
# find the number of populations provided
maxmindxpopl = 0
for l in range(10):
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
if gmod.indxpopl > maxmindxpopl:
maxmindxpopl = gmod.indxpopl
numbpoplinpt = maxmindxpopl + 1
if numbpoplinpt != gmod.numbpopl:
print('State file and fitting metamodel have different number of populations.')
# find the number of elements provided
cntr = np.zeros(gmod.numbpoplinpt, dtype=int)
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
cntr[indxpopl] += 1
if gdat.typeverb > 0:
print('Number of elements found:')
print(cntr)
for attr in thisfile:
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase == attr:
if gmod.nameparagenrbase.startswith('numbelem'):
try:
indxpopltemp = int(gmod.nameparagenrbase[-1])
initnumbelem = getattr(gdat, 'initnumbelempop%d' % indxpopltemp)
print('Initial condition for the number of elements conflicts with the state file. Defaulting to the argument...')
except:
initnumbelem = thisfile[attr][()]
gmod.this.paragenrunitfull[k] = initnumbelem
else:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', thisfile[attr][()], k)
if gmod.this.paragenrunitfull[k] == 0.:
print('Warning CDF is zero.')
if not np.isfinite(thisfile[attr][()]):
raise Exception('Retreived state parameter is not finite.')
if (gmod.numbparaelem == 0 or gmod.numbparaelem > 0 and not k in gmod.indxpara.numbelem) and \
(not np.isfinite(gmod.this.paragenrunitfull[k]) or gmod.this.paragenrunitfull[k] < 0. or \
gmod.this.paragenrunitfull[k] > 1.):
raise Exception('CDF of the retreived state parameter is bad.')
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
maxm.numbelem = getattr(gdat.fitt.maxm, 'numbelempop%d' % l)
if gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] > maxm.numbelem:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = maxm.numbelem
if gdat.typeverb > 0:
print('Tapering off the element list...')
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
if gdat.typeverb > 0:
print('gmod.this.paragenrunitfull[gmod.indxpara.numbelem]')
print(gmod.this.paragenrunitfull[gmod.indxpara.numbelem])
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if (gmod.this.paragenrunitfull == 0).all():
raise Exception('Bad initialization.')
if gmod.numbparaelem > 0 and gmod.this.indxparagenrfullelem is not None:
for nameparagenrelem in gmod.namepara.elem:
initcomp = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
initcomp[l] = np.empty(len(gmod.this.indxelemfull[l]))
for k in range(len(gmod.this.indxelemfull[l])):
namefiel = '%spop%d%04d' % (nameparagenrelem, l, k)
for attr in thisfile:
if namefiel == attr:
initcomp[l][k] = thisfile[namefiel][()]
setattr(gdat, 'init' + nameparagenrelem, initcomp)
initcompfromstat(gdat, gdatmodi, 'init')
thisfile.close()
else:
boolinitreco = False
if gdat.typeverb > 0:
print('Could not find the state file, %s, to initialize the sampler.' % path)
if gdat.inittype == 'refr':
if gdat.typedata == 'inpt':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gdat.refr.numbelem[l]
if gdat.typedata == 'mock':
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if not (gdat.inittype == 'pert' and gmod.nameparagenrbase.startswith('numbelem')) and \
gmod.nameparagenrbase in gmod.nameparagenrbase:
gmod.indxpara.true = np.where(gmod.nameparagenrbase == gmod.nameparagenrbase)[0]
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmodstat.paragenrscalfull[gmod.indxpara.true], k)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.typeverb > 1:
show_paragenrscalfull(gdat, gdatmodi)
if gmod.this.indxparagenrfullelem is not None:
print('Initializing elements from the reference element parameters...')
show_paragenrscalfull(gdat, gdatmodi)
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
show_paragenrscalfull(gdat, gdatmodi)
initcompfromstat(gdat, gdatmodi, 'refr')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
## impose user-specified individual initial values
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase.startswith('numbelem'):
continue
if gdat.inittype == 'reco' or gdat.inittype == 'refr' or gdat.inittype == 'pert':
try:
getattr(gdat, 'init' + gmod.nameparagenrbase)
print('Conflicting initial state arguments detected, init keyword takes precedence.')
except:
pass
try:
raise Exception('')
initvalu = getattr(gdat, 'init' + gmod.nameparagenrbase)
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', initvalu, k)
if gdat.typeverb > 0:
print('Received initial condition for %s: %.3g' % (gmod.nameparagenrbase, initvalu))
except:
pass
## PSF
if gdat.initpsfp is not None:
print('Initializing the metamodel PSF from the provided initial state...')
if gdat.initpsfp.size != gmod.indxpara.psfp.size:
raise Exception('')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gdat.initpsfp[k-gmod.indxpara.psfp[0]], k)
if gdat.initpsfprefr:
print('Initializing the metamodel PSF from the reference state...')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmod.psfpexpr[k-gmod.indxpara.psfp[0]], k)
if gdat.inittype == 'rand' or gdat.inittype == 'reco' and not boolinitreco:
if gdat.typeverb > 0:
print('Initializing from a random state...')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
# check the initial unit sample vector for bad entries
if gmod.numbparaelem > 0:
indxsampdiff = np.setdiff1d(gmod.indxparagenrfull, gmod.indxpara.numbelem)
if np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])).any():
raise Exception('')
indxsampbaddlowr = np.where((gmod.this.paragenrunitfull[indxsampdiff] <= 0.) | np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])))[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull[indxsampdiff] >= 1.)[0]
indxsampbaddlowr = indxsampdiff[indxsampbaddlowr]
indxsampbadduppr = indxsampdiff[indxsampbadduppr]
else:
indxsampbaddlowr = np.where(gmod.this.paragenrunitfull <= 0.)[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull >= 1.)[0]
indxsampbadd = np.concatenate((indxsampbaddlowr, indxsampbadduppr))
if indxsampbadd.size > 0:
print('Initial value caused unit sample vector to go outside the unit interval...')
show_paragenrscalfull(gdat, gdatmodi, indxsampshow=indxsampbadd)
gmod.this.paragenrunitfull[indxsampbadd] = np.random.rand(indxsampbadd.size)
raise Exception('')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
indxbadd = np.where(np.logical_not(np.isfinite(gmod.this.paragenrscalfull)))[0]
if indxbadd.size > 0:
raise Exception('')
def writfile(gdattemp, path):
filepick = open(path + '.p', 'wb')
filearry = h5py.File(path + '.h5', 'w')
gdattemptemp = tdpy.gdatstrt()
for attr, valu in gdattemp.__dict__.items():
if attr.endswith('psfnintp'):
continue
if isinstance(valu, np.ndarray) and valu.dtype != np.dtype('O') and valu.dtype != np.dtype('<U4'):# or isinstance(valu, str) or \
#isinstance(valu, float) or isinstance(valu, bool) or isinstance(valu, int) or isinstance(valu, np.float):
filearry.create_dataset(attr, data=valu)
else:
# temp -- make sure interpolation objects are not written.
if attr != 'adisobjt' and attr != 'redsfromdlosobjt' and attr != 'edisintp':
setattr(gdattemptemp, attr, valu)
print('Writing to %s...' % path)
pickle.dump(gdattemptemp, filepick, protocol=pickle.HIGHEST_PROTOCOL)
filepick.close()
filearry.close()
def retr_deflcutf(angl, defs, asca, acut, asym=False):
fracanglasca = angl / asca
deflcutf = defs / fracanglasca
# second term in the NFW deflection profile
fact = np.ones_like(fracanglasca)
indxlowr = np.where(fracanglasca < 1.)[0]
indxuppr = np.where(fracanglasca > 1.)[0]
fact[indxlowr] = np.arccosh(1. / fracanglasca[indxlowr]) / np.sqrt(1. - fracanglasca[indxlowr]**2)
fact[indxuppr] = np.arccos(1. / fracanglasca[indxuppr]) / np.sqrt(fracanglasca[indxuppr]**2 - 1.)
if asym:
deflcutf *= np.log(fracanglasca / 2.) + fact
else:
fracacutasca = acut / asca
factcutf = fracacutasca**2 / (fracacutasca**2 + 1)**2 * ((fracacutasca**2 + 1. + 2. * (fracanglasca**2 - 1.)) * fact + \
np.pi * fracacutasca + (fracacutasca**2 - 1.) * np.log(fracacutasca) + np.sqrt(fracanglasca**2 + fracacutasca**2) * (-np.pi + (fracacutasca**2 - 1.) / fracacutasca * \
np.log(fracanglasca / (np.sqrt(fracanglasca**2 + fracacutasca**2) + fracacutasca))))
deflcutf *= factcutf
return deflcutf
def initchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime())
def stopchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime() - getattr(gdatmodi.this, 'chro' + name))
def retr_defl(gdat, indxpixlelem, lgal, bgal, angllens, ellp=None, angl=None, rcor=None, asca=None, acut=None):
# translate the grid
lgaltran = gdat.lgalgrid[indxpixlelem] - lgal
bgaltran = gdat.bgalgrid[indxpixlelem] - bgal
if acut is not None:
defs = angllens
angl = np.sqrt(lgaltran**2 + bgaltran**2)
defl = retr_deflcutf(angl, defs, asca, acut)
defllgal = lgaltran / angl * defl
deflbgal = bgaltran / angl * defl
else:
bein = angllens
# rotate the grid
lgalrttr = np.cos(angl) * lgaltran - np.sin(angl) * bgaltran
bgalrttr = np.sin(angl) * lgaltran + np.cos(angl) * bgaltran
axisrati = 1. - ellp
facteccc = np.sqrt(1. - axisrati**2)
factrcor = np.sqrt(axisrati**2 * lgalrttr**2 + bgalrttr**2)
defllgalrttr = bein * axisrati / facteccc * np.arctan(facteccc * lgalrttr / factrcor)
deflbgalrttr = bein * axisrati / facteccc * np.arctanh(facteccc * bgalrttr / factrcor)
# totate back vector to original basis
defllgal = np.cos(angl) * defllgalrttr + np.sin(angl) * deflbgalrttr
deflbgal = -np.sin(angl) * defllgalrttr + np.cos(angl) * deflbgalrttr
defl = np.vstack((defllgal, deflbgal)).T
return defl
def retr_lpriselfdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_self(feat, minm, maxm)))
return lpri
def retr_lprilogtdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_logt(feat, minm, maxm)))
return lpri
def retr_lpripowrdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
gmod = getattr(gdat, strgmodl)
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + strgfeat + 'pop%d' % l)]
lpri = np.sum(np.log(pdfn_powr(feat, minm, maxm, slop)))
return lpri
def retr_lpridpowdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
brek = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distbrek')[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + strgfeat)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + strgfeat)[l]]
lpri = np.sum(np.log(pdfn_dpow(feat, minm, maxm, brek, sloplowr, slopuppr)))
return lpri
def retr_lprigausdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
distmean = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'diststdv')[l]]
lpri = np.sum(np.log(pdfn_gaus(feat, distmean, diststdv)))
return lpri
def retr_lpriigamdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
slop = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'slop')[l]]
cutf = getattr(gmod, 'cutf' + strgfeat)
lpri = np.sum(np.log(pdfn_igam(feat, slop, cutf)))
return lpri
def traptdim(gdat, arry):
s1 = arry[0, 0] + arry[-1, 0] + arry[0, -1] + arry[-1, -1]
s2 = np.sum(arry[1:-1, 0]) + np.sum(arry[1:-1, -1]) + np.sum(arry[0, 1:-1]) + np.sum(arry[-1, 1:-1])
s3 = np.sum(arry[1:-1, 1:-1])
summ = (s1 + 2*s2 + 4*s3) * gdat.apix
return summ
def retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons=None):
pdfnspatprio = pdfnspatpriotemp
if spatdistcons is not None:
pdfnspatprio += spatdistcons
summ = traptdim(gdat, pdfnspatprio)
pdfnspatprio /= summ
lpdfspatprio = np.log(pdfnspatprio)
lpdfspatprioobjt = sp.interpolate.RectBivariateSpline(gdat.binspara.bgalcart, gdat.binspara.lgalcart, lpdfspatprio)
return lpdfspatprio, lpdfspatprioobjt
def retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=False):
if strgmodl == 'true':
gdatobjt = gdat.true
elif strgmodl == 'fitt' and boolinit:
gdatobjt = gdat.fitt
else:
gdatobjt = gdatmodi
return gdatobjt
def proc_samp(gdat, gdatmodi, strgstat, strgmodl, fast=False, boolinit=False):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=boolinit)
gmodstat = getattr(gdatobjt, strgstat)
initchro(gdat, gdatmodi, 'pars')
# grab the sample vector
indxpara = np.arange(gmodstat.paragenrscalfull.size)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.paragenrscalfull).all():
raise Exception('')
if gmod.typeevalpsfn != 'none' and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
psfp = gmodstat.paragenrscalfull[gmod.indxpara.psfp]
if gdat.booldiagmode:
if np.where(psfp == 0)[0].size == psfp.size:
raise Exception('')
setattr(gmodstat, 'psfp', psfp)
bacp = gmodstat.paragenrscalfull[gmod.indxpara.bacp]
if gmod.numbparaelem > 0:
# temp -- this may slow down execution
gmodstat.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodstat.indxelemfull, strgmodl)
gmodstat.numbelem = np.empty(gmod.numbpopl, dtype=int)
indxelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
indxelem[l] = np.arange(gmodstat.numbelem[l])
gmodstat.numbelem[l] = np.sum(gmodstat.numbelem[l])
gmodstat.numbelemtotl = np.sum(gmodstat.numbelem)
gmodstat.dictelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.dictelem[l] = dict()
for strgfeat in gmod.namepara.genrelemdefa:
gmodstat.dictelem[l][strgfeat] = []
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem] = gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]
if gdat.booldiagmode:
if ((abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) < 1e-100 ) & (abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) > 0.)).any():
raise Exception('')
if gmodstat.numbelem[l] != len(gmodstat.dictelem[l][nameparagenrelem]):
print('l')
print(l)
print('numbelem')
print(numbelem)
print('gmodstat.dictelem')
print(gmodstat.dictelem)
print('nameparagenrelem')
print(nameparagenrelem)
raise Exception('')
if gdat.boolbinsener:
if gdat.typeverb > 2:
print('Calculating element spectra...')
initchro(gdat, gdatmodi, 'spec')
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], \
edisintp=gdat.edisintp, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'spec')
if gdat.typeverb > 2:
print('Element features:')
for l in gmod.indxpopl:
print('l')
print(l)
for strgfeat in gmod.namepara.genrelem[l]:
print(strgfeat)
print(gmodstat.dictelem[l][strgfeat])
if gdat.booldiagmode:
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if (gmod.listscalparagenrelem[l][g] != 'gaus' and not gmod.listscalparagenrelem[l][g].startswith('lnor')) and \
(gmod.listscalparagenrelem[l][g] != 'expo' and (gmodstat.dictelem[l][nameparagenrelem] < getattr(gmod.minmpara, nameparagenrelem)).any()) or \
(gmodstat.dictelem[l][nameparagenrelem] > getattr(gmod.maxmpara, nameparagenrelem)).any():
print('l, g')
print(l, g)
print('nameparagenrelem')
print(nameparagenrelem)
print('gmodstat.dictelem[l][nameparagenrelem]')
summgene(gmodstat.dictelem[l][nameparagenrelem])
print('getattr(gmod, minm + nameparagenrelem)')
print(getattr(gmod.minmpara, nameparagenrelem))
print('getattr(gmod, maxm + nameparagenrelem)')
print(getattr(gmod.maxmpara, nameparagenrelem))
print('gmod.listscalparagenrelem[l][g]')
print(gmod.listscalparagenrelem[l][g])
raise Exception('')
# calculate element spectra
# temp
if gdat.booldiagmode:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['acut']] < 0.)[0]
if indx.size > 0:
raise Exception('')
if gdat.variacut:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['asca']] < 0.)[0]
if indx.size > 0:
raise Exception('')
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
# evaluate horizontal and vertical position for elements whose position is a power law in image-centric radius
if gmod.typespatdist[l] == 'glc3':
gmodstat.dictelem[l]['dlos'], gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'] = retr_glc3(gmodstat.dictelem[l]['dglc'], \
gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'])
if gmod.typespatdist[l] == 'gangexpo':
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], = retr_lgalbgal(gmodstat.dictelem[l]['gang'], \
gmodstat.dictelem[l]['aang'])
if gdat.booldiagmode:
if gmodstat.numbelem[l] > 0:
if np.amin(gmodstat.dictelem[l]['lgal']) < gmod.minmlgal or \
np.amax(gmodstat.dictelem[l]['lgal']) > gmod.maxmlgal or \
np.amin(gmodstat.dictelem[l]['bgal']) < gmod.minmbgal or \
np.amax(gmodstat.dictelem[l]['bgal']) > gmod.maxmbgal:
raise Exception('Bad coordinates!')
if gmod.typespatdist[l] == 'los3':
gmodstat.dictelem[l]['dglc'], gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'] = retr_los3(gmodstat.dictelem[l]['dlos'], \
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
# evaluate flux for pulsars
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['lumi'] = retr_lumipuls(gmodstat.dictelem[l]['geff'], gmodstat.dictelem[l]['magf'], gmodstat.dictelem[l]['per0'])
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
gmodstat.dictelem[l]['lumi'] = gmodstat.dictelem[l]['lum0'] * (1. + gmodstat.dictelem[l]['reds'])**4
if gmod.typeelem[l] == 'lghtpntspuls' or gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['flux'] = retr_flux(gdat, gmodstat.dictelem[l]['lumi'], gmodstat.dictelem[l]['dlos'])
# evaluate spectra
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], edisintp=gdat.edisintp, spectype=gmod.spectype[l])
else:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'pars')
### loglikelihood
initchro(gdat, gdatmodi, 'modl')
if gmod.boollens:
lgalsour = gmodstat.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gmodstat.paragenrscalfull[gmod.indxpara.bgalsour]
if gdat.typeverb > 2:
print('Evaluating the likelihood...')
# process a sample vector and the occupancy list to calculate secondary variables
if gmod.boollens:
fluxsour = gmodstat.paragenrscalfull[gmod.indxpara.fluxsour]
if gdat.numbener > 1:
sindsour = gmodstat.paragenrscalfull[gmod.indxpara.sindsour]
sizesour = gmodstat.paragenrscalfull[gmod.indxpara.sizesour]
ellpsour = gmodstat.paragenrscalfull[gmod.indxpara.ellpsour]
anglsour = gmodstat.paragenrscalfull[gmod.indxpara.anglsour]
if gmod.typeemishost != 'none':
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
fluxhost = [[] for e in gmod.indxsersfgrd]
if gdat.numbener > 1:
sindhost = [[] for e in gmod.indxsersfgrd]
sizehost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % e)]
bgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % e)]
fluxhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'fluxhostisf%d' % e)]
if gdat.numbener > 1:
sindhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sindhostisf%d' % e)]
sizehost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sizehostisf%d' % e)]
if gmod.boollens:
beinhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
beinhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % e)]
if gmod.typeemishost != 'none':
ellphost = [[] for e in gmod.indxsersfgrd]
anglhost = [[] for e in gmod.indxsersfgrd]
serihost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
ellphost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'ellphostisf%d' % e)]
anglhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'anglhostisf%d' % e)]
serihost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'serihostisf%d' % e)]
if gmod.boollens:
numbpixltemp = gdat.numbpixlcart
defl = np.zeros((numbpixltemp, 2))
# determine the indices of the pixels over which element kernels will be evaluated
if gdat.boolbinsspat:
if gmod.numbparaelem > 0:
listindxpixlelem = [[] for l in gmod.indxpopl]
listindxpixlelemconc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
listindxpixlelem[l], listindxpixlelemconc[l] = retr_indxpixlelemconc(gdat, strgmodl, gmodstat.dictelem, l)
if gmod.boollens:
sherextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sherextr')]
sangextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sangextr')]
## host halo deflection
initchro(gdat, gdatmodi, 'deflhost')
deflhost = [[] for e in gmod.indxsersfgrd]
indxpixlmiss = gdat.indxpixlcart
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the deflection field due to host galaxy %d' % e)
print('lgalhost[e]')
print(lgalhost[e])
print('bgalhost[e]')
print(bgalhost[e])
print('beinhost[e]')
print(beinhost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
deflhost[e] = retr_defl(gdat, indxpixlmiss, lgalhost[e], bgalhost[e], beinhost[e], ellp=ellphost[e], angl=anglhost[e])
if gdat.booldiagmode:
indxpixltemp = slice(None)
setattr(gmodstat, 'deflhostisf%d' % e, deflhost[e])
if gdat.typeverb > 2:
print('deflhost[e]')
summgene(deflhost[e])
defl += deflhost[e]
if gdat.typeverb > 2:
print('After adding the host deflection...')
print('defl')
summgene(defl)
if gdat.booldiagmode:
if not np.isfinite(deflhost).all():
raise Exception('')
stopchro(gdat, gdatmodi, 'deflhost')
## external shear
initchro(gdat, gdatmodi, 'deflextr')
deflextr = []
indxpixltemp = gdat.indxpixlcart
deflextr = retr_deflextr(gdat, indxpixltemp, sherextr, sangextr)
defl += deflextr
if gdat.typeverb > 2:
print('After adding the external deflection...')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'deflextr')
# Boolean flag to indicate that the object to convolve the image will be needed
boolneedpsfnconv = gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full')
## Boolean flag to indicate that the object to convolve the image will be constructed
boolcalcpsfnconv = strgmodl == 'true' or boolinit or gdat.boolmodipsfn
# get the convolution object
if boolneedpsfnconv and boolcalcpsfnconv:
initchro(gdat, gdatmodi, 'psfnconv')
if gdat.typeverb > 2:
print('Evaluating the PSF convolution kernel...')
psfnconv = [[[] for i in gdat.indxener] for m in gdat.indxevtt]
if gdat.typepixl == 'cart':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
for mm, m in enumerate(gdat.indxevtt):
for ii, i in enumerate(gdat.indxener):
if gmod.typemodlpsfn == 'singgaus':
sigm = psfp[i+m*gdat.numbener]
else:
sigm = fwhm[i, m] / 2.355
gmodstat.psfnconv[mm][ii] = AiryDisk2DKernel(sigm / gdat.sizepixl)
stopchro(gdat, gdatmodi, 'psfnconv')
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and gmod.numbparaelem > 0:
if strgmodl == 'true' or boolinit or gdat.boolmodipsfn:
if gdat.typepixl == 'heal':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gdat.typepixl == 'cart':
if gdat.kernevaltype == 'ulip':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
if gdat.booldiagmode:
if not np.isfinite(gmodstat.psfnintp(0.05)).all():
raise Exception('')
if gdat.kernevaltype == 'bspx':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.anglcart.flatten(), gmod.typemodlpsfn, strgmodl)
# side length of the upsampled kernel
gdat.numbsidekernusam = 100
# side length of the original kernel
gdat.numbsidekern = gdat.numbsidekernusam / factkernusam
gdat.indxsidekern = np.arange(gdat.numbsidekern)
# pad by one row and one column
#psf = np.zeros((gdat.numbsidekernusam+1, gdat.numbsidekernusam+1))
#psf[0:gdat.numbsidekernusam, 0:gdat.numbsidekernusam] = psf0
# make design matrix for each factkernusam x factkernusam region
nx = factkernusam + 1
y, x = mgrid[0:nx, 0:nx] / float(factkernusam)
x = x.flatten()
y = y.flatten()
kernmatrdesi = np.array([full(nx*nx, 1), x, y, x*x, x*y, y*y, x*x*x, x*x*y, x*y*y, y*y*y]).T
# output np.array of coefficients
gmodstat.psfnintp = np.empty((gdat.numbsidekern, gdat.numbsidekern, kernmatrdesi.shape[1]))
# solve p = kernmatrdesi psfnintp for psfnintp
for iy in gdat.indxsidekern:
for ix in gdat.indxsidekern:
p = psf[iy*factkernusam:(iy+1)*factkernusam+1, ix*factkernusam:(ix+1)*factkernusam+1].flatten()
gmodstat.psfnintp[iy, ix, :] = dot(linalg.inv(dot(kernmatrdesi.T, kernmatrdesi)), dot(kernmatrdesi.T, p))
else:
gmodstat.psfnintp = gdat.fitt.this.psfnintp
sbrt = dict()
for name in gmod.listnamediff:
sbrt[name] = []
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = []
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = []
if gmod.boolelemdeflsubhanyy:
deflsubh = []
# retrieve or initialize state variable
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = np.zeros_like(gdat.expo)
if gmod.boolelemdeflsubhanyy:
deflsubh = np.zeros((gdat.numbpixl, 2))
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = np.zeros_like(gdat.expo)
# element kernel evaluation
if gmod.boolelemsbrtdfncanyy:
initchro(gdat, gdatmodi, 'elemsbrtdfnc')
sbrt['dfnc'] = []
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
for k in range(gmodstat.numbelem[l]):
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
if gmod.typeelem[l] == 'clusvari':
sbrtdfnc[0, listindxpixlelem[l][k], 0] += gmodstat.dictelem[l]['nobj'][k] / 2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[listindxpixlelem[l][k]])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[listindxpixlelem[l][k]])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
if gmod.boolelempsfn[l]:
print('sbrtdfnc')
summgene(sbrtdfnc)
sbrtdfnc[:, listindxpixlelem[l][k], :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
if gmod.typeelem[l].startswith('lghtline'):
sbrtdfnc[:, 0, 0] += gmodstat.dictelem[l]['spec'][:, k]
sbrt['dfnc'] = sbrtdfnc
if gdat.booldiagmode:
if not np.isfinite(sbrtdfnc).all():
raise Exception('Element delta function brightness not finite.')
setattr(gmodstat, 'sbrtdfnc', sbrt['dfnc'])
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['dfnc'])
numbelemtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
numbelemtemp += np.sum(gmodstat.numbelem[l])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtdfnc')
if gmod.boolelemdeflsubhanyy:
initchro(gdat, gdatmodi, 'elemdeflsubh')
if gdat.typeverb > 2:
print('Perturbing subhalo deflection field')
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
for kk, k in enumerate(indxelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
if gmod.typeelemspateval[l] == 'locl':
indxpixl = listindxpixlelem[l][kk]
else:
indxpixl = gdat.indxpixl
deflsubh[indxpixl, :] += retr_defl(gdat, indxpixl, \
gmodstat.dictelem[l]['lgal'][kk], gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['defs'][kk], \
asca=asca, acut=acut)
# temp -- find out what is causing the features in the element convergence maps
#for kk, k in enumerate(indxelem[l]):
# indxpixlpnts = retr_indxpixl(gdat, gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['lgal'][kk])
# if deflsubh[listindxpixlelem[l][kk], :]
if gdat.typeverb > 2:
print('deflsubh')
summgene(deflsubh)
setattr(gmodstat, 'deflsubh', deflsubh)
if gdat.booldiagmode:
if not np.isfinite(deflsubh).all():
raise Exception('Element deflection is not finite.')
defl += deflsubh
if gdat.typeverb > 2:
print('After adding subhalo deflection to the total deflection')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'elemdeflsubh')
if gmod.boolelemsbrtextsbgrdanyy:
initchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if strgstat == 'this':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtgausbgrd':
for k in range(gmodstat.numbelem[l]):
sbrtextsbgrd[:, listindxpixlelem[l][k], :] += gmodstat.dictelem[l]['spec'][:, k, None, None] / \
2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[None, listindxpixlelem[l][k], None])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[None, listindxpixlelem[l][k], None])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
setattr(gmodstat, 'sbrtextsbgrd', sbrtextsbgrd)
sbrt['extsbgrd'] = []
sbrt['extsbgrd'] = sbrtextsbgrd
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['extsbgrd'])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if gdat.typeverb > 2:
print('Element related state variables after perturbations...')
if gmod.boolelemsbrtdfncanyy:
print('sbrtdfnc')
summgene(sbrtdfnc)
if gmod.boolelemdeflsubhanyy:
print('deflsubh')
summgene(deflsubh)
if gmod.boolelemsbrtextsbgrdanyy:
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
if gmod.boollens:
# lensed surface brightness
initchro(gdat, gdatmodi, 'sbrtlens')
if gdat.typeverb > 2:
print('Evaluating lensed surface brightness...')
if strgstat == 'this' or gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrd'] = []
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrdgalx'] = []
if gdat.numbener > 1:
specsour = retr_spec(gdat, np.array([fluxsour]), sind=np.array([sindsour]))
if gdat.typeverb > 2:
print('sindsour')
print(sindsour)
else:
specsour = np.array([fluxsour])
if gdat.typeverb > 2:
print('lgalsour')
print(lgalsour)
print('bgalsour')
print(bgalsour)
print('sizesour')
print(sizesour)
print('ellpsour')
print(ellpsour)
print('anglsour')
print(anglsour)
print('fluxsour')
print(fluxsour)
print('specsour')
print(specsour)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.typeverb > 2:
print('Interpolating the background emission...')
sbrt['bgrdgalx'] = retr_sbrtsers(gdat, gdat.lgalgrid[indxpixlelem[0]], gdat.bgalgrid[indxpixlelem[0]], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
if gdat.typeverb > 2:
print('sbrt[bgrdgalx]')
summgene(sbrt['bgrdgalx'])
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
sbrt['bgrd'] = sbrt['bgrdgalx'] + sbrtextsbgrd
sbrt['lens'] = np.empty_like(gdat.cntpdata)
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
sbrtbgrdobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
sbrt['bgrd'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
bgalprim = gdat.bgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 1]
lgalprim = gdat.lgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 0]
# temp -- T?
sbrt['lens'][ii, :, m] = sbrtbgrdobjt(bgalprim, lgalprim, grid=False).flatten()
else:
if gdat.typeverb > 2:
print('Not interpolating the background emission...')
sbrt['lens'] = retr_sbrtsers(gdat, gdat.lgalgrid - defl[gdat.indxpixl, 0], \
gdat.bgalgrid - defl[gdat.indxpixl, 1], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
sbrt['bgrd'] = retr_sbrtsers(gdat, gdat.lgalgrid, \
gdat.bgalgrid, \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
setattr(gmodthis, 'sbrtlens', sbrt['lens'])
if gdat.booldiagmode:
if not np.isfinite(sbrt['lens']).all():
raise Exception('Lensed emission is not finite.')
if (sbrt['lens'] == 0).all():
raise Exception('Lensed emission is zero everynp.where.')
stopchro(gdat, gdatmodi, 'sbrtlens')
### background surface brightness
sbrtback = []
# temp
#sbrtback = np.empty((numbback, gdat.numbener, indxpixlelem[yy].size, gdat.numbevtt))
# evaluate host galaxy surface brightness
if gmod.typeemishost != 'none':
initchro(gdat, gdatmodi, 'sbrthost')
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the host galaxy surface brightness...')
if gdat.numbener > 1:
spechost = retr_spec(gdat, np.array([fluxhost[e]]), sind=np.array([sindhost[e]]))
else:
spechost = np.array([fluxhost[e]])
if gdat.typeverb > 2:
print('lgalhost[e]')
print(lgalhost[e] * gdat.anglfact)
print('bgalhost[e]')
print(bgalhost[e] * gdat.anglfact)
print('spechost')
print(spechost)
print('sizehost[e]')
print(sizehost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
print('serihost[e]')
print(serihost[e])
sbrt['hostisf%d' % e] = retr_sbrtsers(gdat, gdat.lgalgrid, gdat.bgalgrid, lgalhost[e], \
bgalhost[e], spechost, sizehost[e], ellphost[e], anglhost[e], serihost[e])
setattr(gmodstat, 'sbrthostisf%d' % e, sbrt['hostisf%d' % e])
#sbrthost = sbrt['host']
if gdat.typeverb > 2:
for e in gmod.indxsersfgrd:
print('e')
print(e)
print('sbrt[hostisf%d]')
summgene(sbrt['hostisf%d' % e])
stopchro(gdat, gdatmodi, 'sbrthost')
## model emission
initchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('Summing up the model emission...')
sbrt['modlraww'] = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))
for name in gmod.listnamediff:
if name.startswith('back'):
gmod.indxbacktemp = int(name[4:8])
if gdat.typepixl == 'heal' and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv') and not gmod.boolunifback[gmod.indxbacktemp]:
sbrttemp = getattr(gmod, 'sbrtbackhealfull')[gmod.indxbacktemp]
else:
sbrttemp = gmod.sbrtbacknorm[gmod.indxbacktemp]
if gmod.boolspecback[gmod.indxbacktemp]:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp]]
else:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp][gdat.indxener]][:, None, None]
sbrt['modlraww'] += sbrt[name]
if gdat.booldiagmode:
if np.amax(sbrttemp) == 0.:
raise Exception('')
if gdat.typeverb > 2:
print('name')
print(name)
print('sbrt[name]')
summgene(sbrt[name])
if gdat.typeverb > 2:
for ii, i in enumerate(gdat.indxener):
print('ii, i')
print(ii, i)
for mm, m in enumerate(gdat.indxevtt):
print('mm, m')
print(mm, m)
print('sbrt[modlraww][ii, :, mm]')
summgene(sbrt['modlraww'][ii, :, mm])
# convolve the model with the PSF
if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):
sbrt['modlconv'] = []
# temp -- isotropic background proposals are unnecessarily entering this clause
if gdat.typeverb > 2:
print('Convolving the model image with the PSF...')
sbrt['modlconv'] = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
if gdat.strgcnfg == 'pcat_ferm_igal_mock_test':
print('Convolving ii, i, mm, m')
print(ii, i, mm, m)
if gdat.typepixl == 'cart':
if gdat.numbpixl == gdat.numbpixlcart:
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrt['modlraww'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)), \
psfnconv[mm][ii]).flatten()
else:
sbrtfull = np.zeros(gdat.numbpixlcart)
sbrtfull[gdat.indxpixlrofi] = sbrt['modlraww'][ii, :, mm]
sbrtfull = sbrtfull.reshape((gdat.numbsidecart, gdat.numbsidecart))
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrtfull, psfnconv[mm][ii]).flatten()[gdat.indxpixlrofi]
indx = np.where(sbrt['modlconv'][ii, :, mm] < 1e-50)
sbrt['modlconv'][ii, indx, mm] = 1e-50
if gdat.typepixl == 'heal':
sbrt['modlconv'][ii, :, mm] = hp.smoothing(sbrt['modlraww'][ii, :, mm], fwhm=fwhm[i, m])[gdat.indxpixlrofi]
sbrt['modlconv'][ii, :, mm][np.where(sbrt['modlraww'][ii, :, mm] <= 1e-50)] = 1e-50
setattr(gmodstat, 'sbrtmodlconv', sbrt['modlconv'])
# temp -- this could be made faster -- need the copy() statement because sbrtdfnc gets added to sbrtmodl afterwards
sbrt['modl'] = np.copy(sbrt['modlconv'])
else:
if gdat.typeverb > 2:
print('Skipping PSF convolution of the model...')
sbrt['modl'] = np.copy(sbrt['modlraww'])
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
## add PSF-convolved delta functions to the model
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gdat.typeverb > 2:
print('Adding delta functions into the model...')
print('sbrt[dfnc]')
summgene(sbrt['dfnc'])
sbrt['modl'] += sbrt['dfnc']
stopchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
### count map
initchro(gdat, gdatmodi, 'expo')
cntp = dict()
cntp['modl'] = retr_cntp(gdat, sbrt['modl'])
if gdat.booldiagmode:
setattr(gmodstat, 'cntpmodl', cntp['modl'])
stopchro(gdat, gdatmodi, 'expo')
# mock data specific
if strgmodl == 'true' and strgstat == 'this':
# generate count data
cntptemp = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for i in gdat.indxener:
for j in gdat.indxpixl:
for m in gdat.indxevtt:
cntptemp[i, j, m] = np.random.poisson(cntp['modl'][i, j, m])
setattr(gdat, 'cntpdata', cntptemp)
if not gdat.boolsqzeexpo and np.amax(cntptemp) == 0:
print('cntp[modl]')
summgene(cntp['modl'])
print('gdat.boolsqzeexpo')
print(gdat.boolsqzeexpo)
print('cntptemp')
summgene(cntptemp)
raise Exception('Data is zero.')
proc_cntpdata(gdat)
## diagnostics
if gdat.booldiagmode:
frac = cntp['modl'] / np.mean(cntp['modl'])
if np.amin(frac) < -1e-3 and np.amin(cntp['modl']) < -0.1:
raise Exception('')
indxcubebadd = np.where(cntp['modl'] < 0.)[0]
if indxcubebadd.size > 0:
print('Warning! Model prediction is negative. Correcting to 1e-20...')
cntp['modl'][indxcubebadd] = 1e-20
stopchro(gdat, gdatmodi, 'modl')
# log-prior
initchro(gdat, gdatmodi, 'lpri')
if gdat.typeverb > 2:
print('Evaluating the prior...')
lpri = np.zeros(gmod.numblpri)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
lpri[0] -= 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[l] * gmodstat.numbelem[l]
if gdat.penalpridiff:
sbrtdatapnts = gdat.sbrtdata - sbrt['dfnc']
if gdat.typepixl == 'heal':
raise Exception('')
if gdat.typepixl == 'cart':
psecodimdatapnts = np.empty((gdat.numbener, gdat.numbsidecarthalf, gdat.numbevtt))
psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
sigm = fwhm / 2.355
psecodimdatapntsprio = np.exp(-2. * gdat.meanpara.mpolodim[None, :, None] / (0.1 / sigm[:, None, :]))
lpridiff = 0.
for i in gdat.indxener:
for m in gdat.indxevtt:
psecdatapnts = retr_psec(gdat, sbrtdatapnts[i, :, m])
psecodimdatapnts[i, :, m] = retr_psecodim(gdat, psecdatapnts)
psecodimdatapnts[i, :, m] /= psecodimdatapnts[i, 0, m]
lpridiff += -0.5 * np.sum((psecodimdatapnts[i, :, m] - psecodimdatapntsprio[i, :, m])**2)
setattr(gmodstat, 'psecodimdatapntsen%02devt%d' % (i, m), psecodimdatapnts[i, :, m])
setattr(gmodstat, 'psecodimdatapntsprioen%02devt%d'% (i, m), psecodimdatapntsprio[i, :, m])
lpri[1] = lpridiff
setattr(gmodstat, 'lpridiff', lpridiff)
if gmod.typemodltran == 'pois':
meanelem = gmodstat.paragenrscalfull[gmod.indxpara.meanelem]
for l in gmod.indxpopl:
lpri[2] += retr_lprbpois(gmodstat.numbelem[l], meanelem[l])
for l in gmod.indxpopl:
for g, (strgfeat, strgpdfn) in enumerate(zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l])):
indxlpritemp = 3 + l * gmod.numbparagenrelem + g
lpri[indxlpritemp] = retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, gmodstat.paragenrscalfull, gmodstat.dictelem, gmodstat.numbelem)
lpritotl = np.sum(lpri)
if gdat.typeverb > 1:
print('lpritotl')
print(lpritotl)
### log-likelihood
initchro(gdat, gdatmodi, 'llik')
llik = retr_llik(gdat, strgmodl, cntp['modl'])
if gdat.typeverb > 2:
print('cntp[modl]')
summgene(cntp['modl'])
print('np.sum(cntp[modl], (1, 2))')
print(np.sum(cntp['modl'], (1, 2)))
print('np.sum(gdat.cntpdata, (1, 2))')
print(np.sum(gdat.cntpdata, (1, 2)))
if gdat.booldiagmode:
if not np.isfinite(llik).all():
raise Exception('Likelihood is not finite.')
gmodstat.lliktotl = np.sum(llik)
if gdat.booldiagmode:
if isinstance(gmodstat.lliktotl, np.ndarray):
raise Exception('')
if not np.isfinite(gmodstat.lliktotl).all():
raise Exception('')
numbdoff = gdat.numbdata - gmod.numbparagenrbase
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
numbdoff -= len(gmodstat.indxparagenrfullelem[l]['full'])
setattr(gmodstat, 'llik', llik)
setattr(gmodstat, 'llikmean', gmodstat.lliktotl / gdat.numbdata)
setattr(gmodstat, 'llikcmea', gmodstat.lliktotl / (gdat.numbdata - numbdoff))
if gdat.typeverb > 2:
print('llik')
summgene(llik)
if gdat.typeverb > 1:
print('gmodstat.lliktotl')
print(gmodstat.lliktotl)
stopchro(gdat, gdatmodi, 'llik')
lpostotl = lpritotl + gmodstat.lliktotl
if gdat.typeverb > 1:
print('lpostotl')
print(lpostotl)
setattr(gmodstat, 'lpritotl', lpritotl)
setattr(gmodstat, 'gmodstat.lliktotl', gmodstat.lliktotl)
setattr(gmodstat, 'lpostotl', lpostotl)
stopchro(gdat, gdatmodi, 'lpri')
if strgstat == 'next':
return
initchro(gdat, gdatmodi, 'tert')
setattr(gmodstat, 'lpri', lpri)
if gmod.numbparaelem > 0:
setattr(gmodstat, 'lpripena', lpri[0])
dicttert = {}
## load necessary variables
## derived variables
## residual count map
cntp['resi'] = []
cntp['resi'] = gdat.cntpdata - cntp['modl']
setattr(gmodstat, 'cntpmodl', cntp['modl'])
setattr(gmodstat, 'cntpresi', cntp['resi'])
setattr(gmodstat, 'llik', llik)
#if gmod.boollens:
# setattr(gmodstat, 'deflhost', deflhost)
if gmod.boollens:
setattr(gmodstat, 'defl', defl)
for e in gmod.indxsersfgrd:
masshostbein = massfrombein * beinhost[e]**2
setattr(gmodstat, 'masshostisf%dbein' % e, masshostbein)
### sort with respect to deflection at scale radius
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
indxelemsortampl = np.argsort(gmodstat.dictelem[l][nameparaelemsort[l]])[::-1]
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem + 'sort'] = gmodstat.dictelem[l][nameparagenrelem][indxelemsortampl]
deflsing = np.zeros((gdat.numbpixlcart, 2, numbdeflsingplot))
conv = np.zeros((gdat.numbpixlcart))
convpsec = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecodim = np.zeros((gdat.numbsidecarthalf))
if gmod.numbparaelem > 0:
if boolelemlens:
gmod.indxpopllens = gmod.typeelem.index('lens')
numbdeflsing = 2
if gmod.numbparaelem > 0:
if boolelemlens:
if numbelem[indxpopllens] > 0:
numbdeflsing += min(numbdeflsubhplot, numbelem[indxpopllens])
numbdeflsing += 1
for k in range(numbdeflsing):
indxpixltemp = gdat.indxpixlcart
if k == 0:
# temp -- should take other sersics into account
deflsing[indxpixltemp, :, k] = deflhost[0]
elif k == 1:
deflsing[indxpixltemp, :, k] = deflextr
elif k == 2:
deflsing[indxpixltemp, :, k] = defl - deflextr - deflhost[0]
else:
asca = gmodstat.dictelem[indxpopllens]['ascasort'][None, k-3]
acut = gmodstat.dictelem[indxpopllens]['acutsort'][None, k-3]
deflsing[listindxpixlelem[indxpopllens][k], :, k] = retr_defl(gdat, listindxpixlelem[indxpopllens][k], \
gmodstat.dictelem[indxpopllens]['lgalsort'][None, k-3], gmodstat.dictelem[indxpopllens]['bgalsort'][None, k-3], \
gmodstat.dictelem[indxpopllens]['defssort'][None, k-3], asca=asca, acut=acut)
# convergence
## total
conv[:] = retr_conv(gdat, defl)
convhost = np.zeros((gmod.numbsersfgrd, gdat.numbpixlcart))
for e in gmod.indxsersfgrd:
convhost[e, :] = retr_conv(gdat, deflhost[e])
### power spectrum
#### two dimensional
convpsec[:] = retr_psec(gdat, conv[:])
#### one dimensional
convpsecodim[:] = retr_psecodim(gdat, convpsec[:])
setattr(gmodstat, 'convpsec', convpsec)
setattr(gmodstat, 'convpsecodim', convpsecodim)
setattr(gmodstat, 'conv', conv[...])
for e in gmod.indxsersfgrd:
setattr(gmodstat, 'convisf%d' % e, convhost[e, ...])
## subhalos
if gmod.numbparaelem > 0:
if boolelemlens:
convelem = np.zeros((gdat.numbpixl))
convpsecelem = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecelemodim = np.zeros((gdat.numbsidecarthalf))
### convergence
convelem[:] = retr_conv(gdat, deflsubh)
### power spectrum
##### two dimensional
convpsecelem[:] = retr_psec(gdat, convelem[:])
##### one dimensional
convpsecelemodim[:] = retr_psecodim(gdat, convpsecelem[:])
setattr(gmodstat, 'convpsecelem', convpsecelem)
setattr(gmodstat, 'convpsecelemodim', convpsecelemodim)
setattr(gmodstat, 'convelem', convelem[...])
setattr(gmodstat, 'defl', defl)
### magnification
magn = np.empty((gdat.numbpixlcart))
histdefl = np.empty((gdat.numbdefl))
if gmod.numbparaelem > 0 and boolelemlens:
histdeflsubh = np.empty((gdat.numbdefl))
deflsingmgtd = np.zeros((gdat.numbpixlcart, numbdeflsingplot))
magn[:] = 1. / retr_invm(gdat, defl)
histdefl[:] = np.histogram(defl, bins=gdat.binspara.defl)[0]
if gmod.numbparaelem > 0:
if boolelemlens:
histdeflsubh[:] = np.histogram(deflsubh, bins=gdat.binspara.deflsubh)[0]
deflsingmgtd[:, :] = np.sqrt(np.sum(deflsing[...]**2, axis=1))
if gmod.numbparaelem > 0:
if boolelemlens:
setattr(gmodstat, 'histdeflsubh', histdeflsubh)
setattr(gmodstat, 'histdefl', histdefl)
setattr(gmodstat, 'magn', magn[...])
setattr(gmodstat, 'deflsing', deflsing[...])
setattr(gmodstat, 'deflsingmgtd', deflsingmgtd[...])
## element related
if gmod.numbparaelem > 0:
if gdat.numbpixl == 1:
for l in gmod.indxpopl:
for k in range(gmodstat.numbelem[l]):
setattr(gmodstat, 'speclinepop%d%04d' % (l, k), gmodstat.dictelem[l]['spec'][:, k])
if gdat.typedata == 'mock' and strgmodl == 'true' and gdat.numbpixl > 1:
gdat.refrlgal = [[] for l in gmod.indxpopl]
gdat.refrbgal = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gdat.refrlgal[l] = np.tile(gmodstat.dictelem[l]['lgal'], [3] + list(np.ones(gmodstat.dictelem[l]['lgal'].ndim, dtype=int)))
gdat.refrbgal[l] = np.tile(gmodstat.dictelem[l]['bgal'], [3] + list(np.ones(gmodstat.dictelem[l]['bgal'].ndim, dtype=int)))
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['per1'] = retr_per1(gmodstat.dictelem[l]['per0'], gmodstat.dictelem[l]['magf'])
if gmod.numbparaelem > 0:
if strgstat == 'this' or gdat.boolrefeforc and strgmodl == 'fitt':
# correlate the fitting model elements with the reference elements
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
indxelemrefrasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] == 0:
continue
indxelemfittmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
indxelemrefrmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
matrdist = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]))
for k in range(gmodstat.numbelem[l]):
# construct a matrix of angular distances between reference and fitting elements
if gmod.typeelem[l].startswith('lghtline'):
matrdist[:, k] = abs(gdat.refrelin[q][0, :] - gmodstat.dictelem[l]['elin'][k]) / gdat.refrelin[q][0, :]
else:
matrdist[:, k] = retr_angldist(gdat, gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k])
indxelemrefrmatr[:, k] = np.arange(gdat.refr.numbelem[q])
indxelemfittmatr[:, k] = k
matrdist = matrdist.flatten()
indxelemrefrmatr = indxelemrefrmatr.flatten()
indxelemfittmatr = indxelemfittmatr.flatten()
# take only angular separations smaller than some threshold
indxmatrthrs = np.where(matrdist < gdat.anglassc)
matrdist = matrdist[indxmatrthrs]
indxelemrefrmatr = indxelemrefrmatr[indxmatrthrs]
indxelemfittmatr = indxelemfittmatr[indxmatrthrs]
# sort the remaining associations with respect to distance
indxmatrsort = np.argsort(matrdist)
matrdist = matrdist[indxmatrsort]
indxelemrefrmatr = indxelemrefrmatr[indxmatrsort]
indxelemfittmatr = indxelemfittmatr[indxmatrsort]
for c in range(matrdist.size):
if indxelemrefrmatr[c] in indxelemrefrasschits[q][l] or indxelemfittmatr[c] in indxelemfittasschits[q][l]:
continue
indxelemrefrasschits[q][l].append(indxelemrefrmatr[c])
indxelemfittasschits[q][l].append(indxelemfittmatr[c])
indxelemrefrasschits[q][l] = np.array(indxelemrefrasschits[q][l])
indxelemfittasschits[q][l] = np.array(indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasschits', indxelemrefrasschits)
setattr(gmodstat, 'indxelemfittasschits', indxelemfittasschits)
indxelemrefrasscmiss = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasscfals = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
# indices of the reference elements not associated with the fitting model elements
if gdat.refr.numbelem[q] > 0:
indxelemrefrasscmiss[q][l] = np.setdiff1d(np.arange(gdat.refr.numbelem[q]), indxelemrefrasschits[q][l])
# indices of the fitting model elements not associated with the reference elements
if gmodstat.numbelem[l] > 0:
indxelemfittasscfals[q][l] = np.setdiff1d(np.arange(gmodstat.numbelem[l]), indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasscmiss', indxelemrefrasscmiss)
setattr(gmodstat, 'indxelemfittasscfals', indxelemfittasscfals)
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for l in gmod.indxpopl:
# collect the associated reference element parameter for each fitting element
for strgfeat in gdat.refr.namepara.elemonly[q][l]:
name = strgfeat + gdat.listnamerefr[q]
if strgfeat != 'spec' and strgfeat != 'specplot':
refrfeat = getattr(gdat.refr, strgfeat)
gmodstat.dictelem[l][name] = np.zeros(gmodstat.numbelem[l])
if len(refrfeat[q]) > 0 and len(indxelemrefrasschits[q][l]) > 0:
gmodstat.dictelem[l][name][indxelemfittasschits[q][l]] = refrfeat[q][0, indxelemrefrasschits[q][l]]
print('temp')
continue
# collect the error in the associated reference element amplitude
for strgfeat in gdat.listnameparaetotlelemcomm[q][l]:
refrfeat = getattr(gdat.refr, strgfeat)
if strgfeat == gmod.nameparagenrelemampl[l] and len(indxelemfittasschits[q][l]) > 0:
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]] = np.zeros(gmodstat.numbelem[l])
fittfeattemp = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
refrfeattemp = refrfeat[q][0, indxelemrefrasschits[q][l]]
if gdat.booldiagmode:
if not np.isfinite(refrfeattemp).all():
raise Exception('')
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]][indxelemfittasschits[q][l]] = 100. * (fittfeattemp - refrfeattemp) / refrfeattemp
if gdat.boolrefeforc and strgmodl == 'fitt':
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat in gdat.refr.namepara.elem[gdat.indxrefrforc[l]]:
if len(indxelemrefrasschits[gdat.indxrefrforc[l]][l]) == 0:
continue
refrfeat = getattr(gdat.refr, strgfeat)[gdat.indxrefrforc[l]][0, indxelemrefrasschits[gdat.indxrefrforc[l]][l]]
if len(gmodstat.dictelem[l][strgfeat]) == 0:
continue
lpritotl += -2. * np.sum(1e6 * (gmodstat.dictelem[l][strgfeat][indxelemfittasschits[gdat.indxrefrforc[l]][l]] - refrfeat)**2 / refrfeat**2)
# other tertiary variables continues
## number of degrees of freedom
chi2doff = np.sum(cntp['resi']**2 / gdat.varidata) / numbdoff
if gdat.booldiagmode:
if not np.isfinite(cntp['resi']).all():
raise Exception('')
if not np.isfinite(numbdoff):
raise Exception('')
if not np.isfinite(chi2doff):
raise Exception('')
setattr(gmodstat, 'numbdoff', numbdoff)
setattr(gmodstat, 'chi2doff', chi2doff)
if gmod.boolelempsfn and gmod.numbparaelem > 0:
gmodstat.fwhmpsfn = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gmod.numbparaelem > 0:
### derived parameters
for l in gmod.indxpopl:
# luminosity
if gmod.boolelemlght[l] and 'flux' in gmod.namepara.genrelem[l]:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat.startswith('reds') and strgfeat != 'reds':
namerefr = strgfeat[-4:]
gmodstat.dictelem[l]['lumi' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
gmodstat.dictelem[l]['dlos' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
reds = gmodstat.dictelem[l]['reds' + namerefr]
indxgood = np.where(np.isfinite(gmodstat.dictelem[l]['reds' + namerefr]))[0]
if indxgood.size > 0:
# temp -- these units only work for energy units of keV
dlos = gdat.adisobjt(reds)
gmodstat.dictelem[l]['dlos' + namerefr][indxgood] = dlos
lumi = retr_lumi(gdat, gmodstat.dictelem[l]['flux'], dlos, reds)
gmodstat.dictelem[l]['lumi' + namerefr][indxgood] = lumi
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['mass'] = full([numbelem[l]], 3.)
if gdat.typeverb > 2:
print('l')
print(l)
if gdat.boolbinsspat:
#### radial and angular coordinates
gmodstat.dictelem[l]['gang'] = retr_gang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
gmodstat.dictelem[l]['aang'] = retr_aang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
if gmod.boolelemlght[l]:
#### number of expected counts
if gdat.boolbinsspat:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal']], gmodstat.dictelem[l]['spec'])
else:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['elin']], gmodstat.dictelem[l]['spec'])
#### delta log-likelihood
gmodstat.dictelem[l]['deltllik'] = np.zeros(gmodstat.numbelem[l])
if not (strgmodl == 'true' and gdat.checprio):
if gdat.typeverb > 2:
print('Calculating log-likelihood differences when removing elements from the model.')
for k in range(gmodstat.numbelem[l]):
# construct gdatmodi
gdatmoditemp = tdpy.gdatstrt()
gdatmoditemp.this = tdpy.gdatstrt()
gdatmoditemp.next = tdpy.gdatstrt()
gdatmoditemp.this.indxelemfull = gmodstat.indxelemfull
gdatmoditemp.this.paragenrscalfull = gmodstat.paragenrscalfull
gdatmoditemp.this.paragenrunitfull = gmodstat.paragenrunitfull
prop_stat(gdat, gdatmoditemp, strgmodl, deth=True, thisindxpopl=l, thisindxelem=k)
proc_samp(gdat, gdatmoditemp, 'next', strgmodl)#, boolinit=boolinit)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.lliktotl):
raise Exception('')
gdatobjttemp = retr_gdatobjt(gdat, gdatmoditemp, strgmodl)#, boolinit=boolinit)
nextlliktotl = gdatobjttemp.next.lliktotl
gmodstat.dictelem[l]['deltllik'][k] = gmodstat.lliktotl - nextlliktotl
if gdat.typeverb > 2:
print('deltllik calculation ended.')
# more derived parameters
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
### PSF FWHM
if gdat.typepixl == 'cart':
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
setattr(gmodstat, 'fwhm', fwhm)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gmod.numbparaelem > 0:
sbrt['dfnctotl'] = np.zeros_like(gdat.expo)
sbrt['dfncsubt'] = np.zeros_like(gdat.expo)
sbrt['dfncsupt'] = np.zeros_like(gdat.expo)
for l in gmod.indxpopl:
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'] = np.zeros_like(gdat.expo)
if gmod.boolelemsbrt[l]:
for k in range(gmodstat.numbelem[l]):
# read normalization from the element dictionary
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
# calculate imprint on the element surface brightness state variable
if gmod.boolelempsfn[l]:
sbrttemp = retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
indxpixltemp = listindxpixlelem[l][k]
if gmod.typeelem[l].startswith('lghtline'):
sbrttemp = gmodstat.dictelem[l]['spec'][:, k, None, None]
# add it to the state variable depending on the significance
sbrt['dfnctotl'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] > 35:
sbrt['dfncsupt'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] < 35:
sbrt['dfncsubt'][:, indxpixltemp, :] += sbrttemp
# calculate imprint without PSF truncation to calculate approximation errors
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'][:, :, :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, gdat.indxpixl)
setattr(gmodstat, 'sbrtdfncsubtpop%d' % l, sbrt['dfncsubt'])
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.booldiagmode:
numbtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtextsbgrd[l]:
numbtemp += np.sum(gmodstat.numbelem[l])
if numbtemp > 0 and (sbrtextsbgrd == 0.).all():
raise Exception('')
sbrt['bgrdexts'] = sbrtextsbgrd
#### count maps
cntp = dict()
for name in gmod.listnamegcom:
cntp[name] = retr_cntp(gdat, sbrt[name])
setattr(gmodstat, 'cntp' + name, cntp[name])
### spatial averages
sbrtmean = dict()
sbrtstdv = dict()
for name in gmod.listnamegcom:
sbrtmean[name], sbrtstdv[name] = retr_spatmean(gdat, sbrt[name])
for b in gdat.indxspatmean:
setattr(gmodstat, 'sbrt%smea%d' % (name, b), sbrtmean[name][b])
setattr(gmodstat, 'sbrt%sstd%d' % (name, b), sbrtstdv[name][b])
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
for i in gdat.indxener:
if 'dark' in gmod.listnamegcom:
fracsdenmeandarkdfncsubt = sbrtmean['dfncsubt'][0][0][i] / (sbrtmean['dfncsubt'][0][0][i] + sbrtmean['dark'][0][0][i])
else:
fracsdenmeandarkdfncsubt = 1.
setattr(gmodstat, 'fracsdenmeandarkdfncsubten%02d' % i, np.array([fracsdenmeandarkdfncsubt]))
if 'dark' in gmod.listnamegcom:
booldfncsubt = float(np.where(sbrtmean['dfncsubt'][0][0] > sbrtmean['dark'][0][0])[0].any())
else:
booldfncsubt = 1.
setattr(gmodstat, 'booldfncsubt', np.array([booldfncsubt]))
# find the 1-point function of the count maps of all emission components including the total emission
for name in gmod.listnamegcom:
namehistcntp = 'histcntp' + name
for m in gdat.indxevtt:
if gdat.numbevtt > 1:
namehistcntp += 'evt%d' % m
for i in gdat.indxener:
if gdat.numbener > 1:
namehistcntp += 'en%02d' % i
histcntp = np.histogram(cntp[name][i, :, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, namehistcntp, histcntp)
if False and i == 0 and m == 0 and (name == 'dfnc' or name == 'dfncsubt'):
for strgbins in ['lowr', 'higr']:
strgtemp = 'histcntp' + strgbins + name + 'en%02devt%d' % (i, m)
if strgbins == 'lowr':
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[:gdat.numbtickcbar-1]))]))
else:
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[gdat.numbtickcbar-1:]))]))
else:
histcntp = np.histogram(cntp[name][:, 0, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, 'histcntp' + name + 'evt%d' % m, histcntp)
if gmod.boollens:
if strgmodl == 'true':
s2nr = []
s2nr = cntp['lens'] / np.sqrt(cntp['modl'])
setattr(gmodstat, 's2nr', s2nr)
cntplensgrad = np.empty((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt, 2))
for i in gdat.indxener:
for m in gdat.indxevtt:
cntplenstemp = np.zeros(gdat.numbpixlcart)
cntplenstemp[gdat.indxpixlrofi] = cntp['lens'][i, :, m]
cntplensgrad[i, :, m, :] = retr_gradmaps(gdat, cntplenstemp) * gdat.sizepixl
cntplensgradmgtd = np.sqrt(np.sum(cntplensgrad**2, axis=3))
cntplensgrad *= gdat.sizepixl
indx = np.where(np.fabs(cntplensgrad) > 1. * gdat.sizepixl)
cntplensgrad[indx] = np.sign(cntplensgrad[indx]) * 1. * gdat.sizepixl
deflmgtd = np.sqrt(np.sum(defl**2, axis=1))
setattr(gmodstat, 'deflmgtd', deflmgtd)
setattr(gmodstat, 'cntplensgrad', cntplensgrad)
setattr(gmodstat, 'cntplensgradmgtd', cntplensgradmgtd)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
#### spectra
if gdat.boolbinsspat:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['specplot'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], \
curv=gmodstat.dictelem[l]['curv'], expc=gmodstat.dictelem[l]['expc'], \
sindcolr=sindcolr, spectype=gmod.spectype[l], plot=True)
if gdat.typedata == 'inpt':
if gdat.typeexpr == 'ferm':
# temp
try:
gmodstat.dictelem[l]['sbrt0018'] = gdat.sbrt0018objt(gmodstat.dictelem[l]['bgal'], gmodstat.dictelem[l]['lgal'])
except:
gmodstat.dictelem[l]['sbrt0018'] = gmodstat.dictelem[l]['bgal'] * 0.
if gmod.typeelem[l] == 'lens':
#### distance to the source
if gmod.boollens:
gmodstat.dictelem[l]['diss'] = retr_angldist(gdat, gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], lgalsour, bgalsour)
if gmod.boollenssubh:
gmodstat.dictelem[l]['deflprof'] = np.empty((gdat.numbanglfull, gmodstat.numbelem[l]))
gmodstat.dictelem[l]['mcut'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['rele'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reln'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relk'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relf'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reld'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relc'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relm'] = np.empty(gmodstat.numbelem[l])
# temp -- this can be placed earlier in the code
cntplensobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
cntp['lens'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
for k in np.arange(gmodstat.numbelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
#### deflection profiles
gmodstat.dictelem[l]['deflprof'][:, k] = retr_deflcutf(gdat.meanpara.anglfull, gmodstat.dictelem[l]['defs'][k], asca, acut)
### truncated mass
gmodstat.dictelem[l]['mcut'][k] = retr_mcut(gdat, gmodstat.dictelem[l]['defs'][k], asca, acut, adishost, mdencrit)
#### dot product with the source flux gradient
# temp -- weigh the energy and PSF bins
gmodstat.dictelem[l]['rele'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relf'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, cntpmodl=cntp['modl'][0, :, 0])
deflelem = retr_defl(gdat, gdat.indxpixl, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], gmodstat.dictelem[l]['defs'][k], asca=asca, acut=acut)
bgalprim = gdat.bgalgrid - deflelem[:, 1]
lgalprim = gdat.lgalgrid - deflelem[:, 0]
gmodstat.dictelem[l]['relm'][k] = np.mean(abs(cntp['lens'][0, :, 0] - cntplensobjt(bgalprim, lgalprim, grid=False).flatten()))
gmodstat.dictelem[l]['relk'][k] = gmodstat.dictelem[l]['relm'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reln'][k] = gmodstat.dictelem[l]['rele'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reld'][k] = retr_rele(gdat, gdat.cntpdata[0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relc'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, absv=False) / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
### distribution of element parameters and features
#### calculate the model filter
listindxelemfilt = [[[] for l in gmod.indxpopl] for namefilt in gdat.listnamefilt]
for k, namefilt in enumerate(gdat.listnamefilt):
for l in gmod.indxpopl:
if namefilt == '':
listindxelemfilt[k][l] = np.arange(gmodstat.numbelem[l])
if namefilt == 'imagbndr':
listindxelemfilt[k][l] = np.where((np.fabs(gmodstat.dictelem[l]['lgal']) < gdat.maxmgangdata) & (np.fabs(gmodstat.dictelem[l]['bgal']) < gdat.maxmgangdata))[0]
if namefilt == 'deltllik':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['deltllik'] > 0.5 * gmod.numbparagenrelemsing[l])[0]
if namefilt == 'nrel':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['reln'] > 0.3)[0]
for l in gmod.indxpopl:
# histograms of element parameters
for namefrst in gmod.namepara.elem[l]:
## one dimensional
if namefrst[:-4] == 'etag':
continue
if namefrst == 'specplot' or namefrst == 'deflprof':
continue
elif namefrst == 'spec':
histfrst = np.zeros((gdat.numbbinsplot, gdat.numbener))
for i in gdat.indxener:
histfrst[:, i] = np.histogram(gmodstat.dictelem[l]['spec'][i, listindxelemfilt[0][l]], gdat.binspara.spec)[0]
elif namefrst == 'cnts':
histfrst = np.histogram(gmodstat.dictelem[l]['cnts'][listindxelemfilt[0][l]], gdat.binspara.cnts)[0]
else:
#elif not (namefrst == 'curv' and gmod.spectype[l] != 'curv' or namefrst == 'expc' \
# and gmod.spectype[l] != 'expc' or namefrst.startswith('sindarry') and \
# gmod.spectype[l] != 'colr'):
binsfrst = getattr(gdat.binspara, namefrst)
#if len(gmodstat.dictelem[l][namefrst]) > 0 and len(listindxelemfilt[0][l]) > 0:
histfrst = np.histogram(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], binsfrst)[0]
strgvarb = 'hist' + namefrst + 'pop%d' % l
setattr(gmodstat, strgvarb, histfrst)
#### two dimensional
for nameseco in gmod.namepara.elem[l]:
if namefrst == 'spec' or namefrst == 'specplot' or namefrst == 'deflprof' or \
nameseco == 'spec' or nameseco == 'specplot' or nameseco == 'deflprof':
continue
if not checstrgfeat(namefrst, nameseco):
continue
binsseco = getattr(gdat.binspara, nameseco)
histtdim = np.histogram2d(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], \
gmodstat.dictelem[l][nameseco][listindxelemfilt[0][l]], [binsfrst, binsseco])[0]
setattr(gmodstat, 'hist' + namefrst + nameseco + 'pop%d' % l, histtdim)
### priors on element parameters and features
for nameparagenrelem in gmod.namepara.genrelem[l]:
xdat = gmodstat.dictelem[l][nameparagenrelem]
minm = getattr(gmod.minmpara, nameparagenrelem + 'pop%d' % l)
maxm = getattr(gmod.maxmpara, nameparagenrelem + 'pop%d' % l)
scal = getattr(gmod.scalpara, nameparagenrelem + 'pop%d' % l)
booltemp = False
if scal.startswith('expo') or scal.startswith('dexp'):
if scal.startswith('expo'):
if scal == 'expo':
sexp = getattr(gmod, 'gangdistsexppop%d' % l)
else:
sexp = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
pdfn = pdfn_expo(xdat, maxm, sexp)
if scal.startswith('dexp'):
pdfn = pdfn_dnp.exp(xdat, maxm, scal)
booltemp = True
if scal.startswith('self') or scal.startswith('logt'):
if scal.startswith('self'):
pdfn = 1. / (maxm - minm) + np.zeros_like(xdat)
else:
pdfn = 1. / (np.log(maxm) - np.log(minm)) + np.zeros_like(xdat)
booltemp = True
# temp
if scal.startswith('powr'):
slop = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem + 'pop%d' % l)]
pdfn = pdfn_powr(xdat, minm, maxm, slop)
booltemp = True
if scal.startswith('dpowslopbrek'):
pdfn = pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr)
booltemp = True
if scal == 'lnormeanstdv':
pdfn = pdfn_lnor(xdat, meanlnor, stdvlnor)
booltemp = True
if scal.startswith('igam'):
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
pdfn = pdfn_igam(xdat, slop, cutf)
booltemp = True
if scal.startswith('gaus'):
# this does not work for mismodeling
meanvarb = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
stdv = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
if nameparagenrelem == 'expc' and gmod.spectype[l] == 'expc':
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
else:
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
booltemp = True
# temp -- meanelem will not be defined
#if booltemp:
# gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'] = gmodstat.numbelem[l] * pdfn * np.interp(xdat, xdatplot, delt)
#setattr(gmodstat, 'hist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
#if strgmodl == 'true':
# setattr(gmodstat, 'refrhist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gmodstat.numbelem[l] > 0:
## total truncated mass of the subhalo as a cross check
# temp -- generalize
asca = gmodstat.dictelem[l]['asca']
acut = gmodstat.dictelem[l]['acut']
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
masssubh = np.array([np.sum(factmcutfromdefs * gmodstat.dictelem[l]['defs'])])
## derived variables as a function of other derived variables
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpntspuls'):
massshel = np.empty(gdat.numbanglhalf)
for k in gdat.indxanglhalf:
indxelemshel = np.where((gdat.binspara.anglhalf[k] < gmodstat.dictelem[l]['gang']) & (gmodstat.dictelem[l]['gang'] < gdat.binspara.anglhalf[k+1]))
massshel[k] = np.sum(gmodstat.dictelem[l]['mass'][indxelemshel])
setattr(gmodstat, 'massshelpop%d' % l, massshel)
if gmod.boollens or gmod.numbparaelem > 0 and gmod.boollenssubh:
# find the host, subhalo masses and subhalo mass fraction as a function of halo-centric radius
listnametemp = gdat.liststrgcalcmasssubh
listnamevarbmass = []
listnamevarbmassscal = []
listnamevarbmassvect = []
for e in gmod.indxsersfgrd:
if boolllenshost:
listnamevarbmassscal += ['masshosttotl']
for strgtemp in listnametemp:
listnamevarbmassvect.append('masshostisf%d' % e + strgtemp)
listnamevarbmassscal.append('masshostisf%d' % e + strgtemp + 'bein')
if gmod.numbparaelem > 0 and gmod.boollenssubh:
listnamevarbmassscal.append('masssubhtotl')
listnamevarbmassscal.append('fracsubhtotl')
for strgtemp in listnametemp:
listnamevarbmassvect.append('masssubh' + strgtemp)
listnamevarbmassvect.append('fracsubh' + strgtemp)
listnamevarbmassscal.append('masssubh' + strgtemp + 'bein')
listnamevarbmassscal.append('fracsubh' + strgtemp + 'bein')
for name in listnamevarbmassvect:
dicttert[name] = np.zeros(gdat.numbanglhalf)
if 'isf' in name:
indxisfrtemp = int(name.split('isf')[1][0])
angl = np.sqrt((gdat.meanpara.lgalcartmesh - lgalhost[indxisfrtemp])**2 + (gdat.meanpara.bgalcartmesh - bgalhost[indxisfrtemp])**2).flatten()
for k in gdat.indxanglhalf:
if name[4:8] == 'host':
convtemp = conv[:]
if name[4:8] == 'subh':
convtemp = convelem[:]
if name.endswith('delt'):
indxpixl = np.where((gdat.binspara.anglhalf[k] < angl) & (angl < gdat.binspara.anglhalf[k+1]))[0]
dicttert[name][k] = 1e6 * np.sum(convtemp[indxpixl]) * mdencrit * \
gdat.apix * adishost**2 / 2. / np.pi * gdat.deltanglhalf[k] / gdat.meanpara.anglhalf[k]
if name.endswith('intg'):
indxpixl = np.where(angl < gdat.meanpara.anglhalf[k])[0]
dicttert[name][k] = np.sum(convtemp[indxpixl]) * mdencrit * gdat.apix * adishost**2
if name[:4] == 'frac':
masshosttotl = 0.
for e in gmod.indxsersfgrd:
masshosttotl += dicttert['masshostisf%d' % e + name[-4:]][k]
if masshosttotl != 0.:
dicttert['fracsubh' + name[8:]][k] = dicttert['masssubh' + name[8:]][k] / masshosttotl
setattr(gmodstat, name, dicttert[name])
# interpolate the host, subhalo masses and subhalo mass fraction at the Einstein radius and save it as a scalar variable
dicttert[name + 'bein'] = np.interp(beinhost, gdat.meanpara.anglhalf, dicttert[name])
setattr(gmodstat, name + 'bein', dicttert[name + 'bein'])
#if gmod.numbparaelem > 0:
# ## copy element parameters to the global object
# feat = [[] for l in gmod.indxpopl]
# for l in gmod.indxpopl:
# feat[l] = dict()
# for strgfeat in gmod.namepara.genrelem[l]:
# if strgfeat[:-4] == 'etag':
# continue
# if len(gmodstat.dictelem[l][strgfeat]) > 0:
# if strgmodl == 'true':
# shap = list(np.ones(gmodstat.dictelem[l][strgfeat].ndim, dtype=int))
# feat[l][strgfeat] = np.tile(gmodstat.dictelem[l][strgfeat], [3] + shap)
# if strgmodl == 'fitt':
# feat[l][strgfeat] = gmodstat.dictelem[l][strgfeat]
#
# #for strgfeat in gmod.namepara.elem:
# # feattemp = [[] for l in gmod.indxpopl]
# # for l in gmod.indxpopl:
# # if strgfeat in gmod.namepara.genrelem[l]:
# # if strgfeat in feat[l]:
# # feattemp[l] = feat[l][strgfeat]
# # else:
# # feattemp[l] = np.array([])
# # setattr(gmodstat, strgfeat, feattemp)
# copy true state to the reference state
#if strgmodl == 'true':
# for name, valu in deepcopy(gdat.__dict__).items():
# if name.startswith('true'):
# #indx = name.find('pop')
# #if indx != -1 and not name.endswith('pop') and name[indx+3].isdigit():
# # namerefr = name.replace('pop%s' % name[indx+3], 'ref%s' % name[indx+3])
# #else:
# # namerefr = name
# #namerefr = name
# #namerefr = namerefr.replace('true', 'refr')
# name = name.replace('true', 'refr')
# setattr(gdat, name, valu)
if gmod.numbparaelem > 0 and gdat.priofactdoff != 0.:
if strgmodl == 'true':
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':
continue
reca = np.zeros(gdat.numbbinsplot) - 1.
indxelempars = np.where(gmodstat.dictelem[q]['deltllik'] > 2.5)[0]
refrhistpars = np.zeros(gdat.numbbinsplot) - 1.
histparaelem = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
indxrefrgood = np.where(histparaelem > 0)[0]
reca[indxrefrgood] = 0.
refrhistpars[indxrefrgood] = 0.
refrhist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
bins = getattr(gdat.binspara, strgfeat)
if len(indxelempars) > 0:
refrhistpars = np.histogram(gmodstat.dictelem[q][strgfeat][indxelempars], bins=bins)[0].astype(float)
if indxrefrgood.size > 0:
reca[indxrefrgood] = refrhistpars[indxrefrgood] / refrhist[indxrefrgood]
setattr(gmodstat, 'histpars' + strgfeat + 'pop%d' % q, refrhistpars)
setattr(gmodstat, 'reca' + strgfeat + 'pop%d' % q, reca)
print('gdat.rtagmock')
print(gdat.rtagmock)
if gdat.rtagmock is not None:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':# or strgfeat.startswith('aerr'):
continue
if strgfeat in gmod.namepara.genrelem[l]:
hist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % l)
reca = getattr(gdat.true.this, 'reca' + strgfeat + 'pop%d' % l)
histcorrreca = hist / reca
setattr(gmodstat, 'histcorrreca' + strgfeat + 'pop%d' % l, histcorrreca)
### Exculusive comparison with the true state
if strgmodl == 'fitt' and gdat.typedata == 'mock':
if gmod.boollens:
numbsingcomm = min(deflsing.shape[2], gmod.deflsing.shape[2])
deflsingresi = deflsing[0, ..., :numbsingcomm] - gmod.deflsing[..., :numbsingcomm]
deflsingresimgtd = np.sqrt(np.sum(deflsingresi**2, axis=1))
deflsingresiperc = 100. * deflsingresimgtd / gmod.deflsingmgtd[..., :numbsingcomm]
setattr(gmodstat, 'numbsingcomm', numbsingcomm)
setattr(gmodstat, 'deflsingresi', deflsingresi)
truedeflmgtd = getattr(gdat.true.this, 'deflmgtd')
truedefl = getattr(gdat.true.this, 'defl')
deflresi = defl - truedefl
deflresimgtd = np.sqrt(np.sum(deflresi**2, axis=1))
deflresiperc = 100. * deflresimgtd / truedeflmgtd
setattr(gmodstat, 'deflresi', deflresi)
setattr(gmodstat, 'deflresimgtd', deflresimgtd)
if gmod.numbparaelem > 0:
trueconvelem = getattr(gdat.true.this, 'convelem')
convelemresi = convelem[:] - trueconvelem
convelemresiperc = 100. * convelemresi / trueconvelem
setattr(gmodstat, 'convelemresi', convelemresi)
setattr(gmodstat, 'convelemresiperc', convelemresiperc)
truemagn = getattr(gdat.true.this, 'magn')
magnresi = magn[:] - truemagn
magnresiperc = 100. * magnresi / truemagn
setattr(gmodstat, 'magnresi', magnresi)
setattr(gmodstat, 'magnresiperc', magnresiperc)
if gmod.numbparaelem > 0:
# correlate the catalog sample with the reference catalog
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] > 0:
cmpl = np.array([float(len(indxelemrefrasschits[q][l])) / gdat.refr.numbelem[q]])
if gdat.booldiagmode:
if cmpl > 1. or cmpl < 0.:
raise Exception('')
else:
cmpl = np.array([-1.])
setattr(gmodstat, 'cmplpop%dpop%d' % (l, q), cmpl)
if gmodstat.numbelem[l] > 0:
fdis = np.array([float(indxelemfittasscfals[q][l].size) / gmodstat.numbelem[l]])
if gdat.booldiagmode:
if fdis > 1. or fdis < 0.:
raise Exception('')
else:
fdis = np.array([-1.])
setattr(gmodstat, 'fdispop%dpop%d' % (q, l), fdis)
# collect the associated fitting element parameter for each reference element
featrefrassc = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
featrefrassc[q][l] = dict()
for strgfeat in gdat.refr.namepara.elem[q]:
if not strgfeat in gmod.namepara.genrelem[l] or strgfeat in gdat.refr.namepara.elemonly[q][l]:
continue
if isinstance(gmodstat.dictelem[l][strgfeat], np.ndarray) and gmodstat.dictelem[l][strgfeat].ndim > 1:
continue
featrefrassc[q][l][strgfeat] = np.zeros(gdat.refr.numbelem[q]) + np.nan
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][strgfeat]) > 0:
featrefrassc[q][l][strgfeat][indxelemrefrasschits[q][l]] = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
name = strgfeat + 'asscpop%dpop%d' % (q, l)
setattr(gmodstat, name, featrefrassc[q][l][strgfeat])
# completeness
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
l = gdat.refr.indxpoplfittassc[q]
for nameparaelemfrst in gdat.refr.namepara.elem[q]:
if nameparaelemfrst.startswith('etag'):
continue
if nameparaelemfrst == 'spec' or nameparaelemfrst == 'specplot':
continue
refrfeatfrst = gdat.refr.dictelem[q][nameparaelemfrst][0, :]
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gdat.refr.namepara.elem[q]:
if nameparaelemfrst == nameparaelemseco:
continue
if nameparaelemseco.startswith('etag'):
continue
if nameparaelemseco == 'spec' or nameparaelemseco == 'specplot':
continue
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the cmpl np.array should depend on strgmodl
cmpltdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot)) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeattdim = getattr(gdat.refr, 'hist%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q))
refrfeatseco = gdat.refr.dictelem[q][nameparaelemseco][0, :]
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
refrhistfeattdimassc = np.histogram2d(refrfeatfrst[indxelemrefrasschits[q][l]], \
refrfeatseco[indxelemrefrasschits[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(refrhistfeattdim != 0.)
if indxgood[0].size > 0:
cmpltdim[indxgood] = refrhistfeattdimassc[indxgood].astype(float) / refrhistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((cmpltdim[indxgood] > 1.) | (cmpltdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q), cmpltdim)
cmplfrst = np.zeros(gdat.numbbinsplot) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeatfrst = getattr(gdat.refr, 'hist' + nameparaelemfrst + 'pop%d' % q)
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
refrhistfeatfrstassc = np.histogram(refrfeatfrst[indxelemrefrasschits[q][l]], bins=binsfeatfrst)[0]
indxgood = np.where(refrhistfeatfrst != 0.)[0]
if indxgood.size > 0:
cmplfrst[indxgood] = refrhistfeatfrstassc[indxgood].astype(float) / refrhistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((cmplfrst[indxgood] > 1.) | (cmplfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%spop%d' % (nameparaelemfrst, q), cmplfrst)
# false discovery rate
for l in gmod.indxpopl:
q = gmod.indxpoplrefrassc[l]
for nameparaelemfrst in gmod.namepara.elem[l]:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gmod.namepara.elem[l]:
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the fdis np.array should depend on strgmodl
fdistdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot))
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemseco]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
strgfeattdim = nameparaelemfrst + nameparaelemseco + 'pop%d' % l
fitthistfeattdim = getattr(gmodstat, 'hist' + strgfeattdim)
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
fitthistfeattdimfals = np.histogram2d(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], \
gmodstat.dictelem[l][nameparaelemseco][indxelemfittasscfals[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(fitthistfeattdim != 0.)
if indxgood[0].size > 0:
fdistdim[indxgood] = fitthistfeattdimfals[indxgood].astype(float) / fitthistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((fdistdim[indxgood] > 1.) | (fdistdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%s%spop%d' % (nameparaelemfrst, nameparaelemseco, l), fdistdim)
fdisfrst = np.zeros(gdat.numbbinsplot)
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
fitthistfeatfrstfals = np.histogram(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], bins=binsfeatfrst)[0]
fitthistfeatfrst = getattr(gmodstat, 'hist' + nameparaelemfrst + 'pop%d' % l)
indxgood = np.where(fitthistfeatfrst != 0.)[0]
if indxgood.size > 0:
fdisfrst[indxgood] = fitthistfeatfrstfals[indxgood].astype(float) / fitthistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((fdisfrst[indxgood] > 1.) | (fdisfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%spop%d' % (nameparaelemfrst, l), fdisfrst)
# temp
if strgmodl == 'true' and gdat.typeverb > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
if np.where(minm > gmodstat.dictelem[l][strgfeat])[0].size > 0 or np.where(maxm < gmodstat.dictelem[l][strgfeat])[0].size > 0:
print('Warning: element parameter outside the plot limits.')
print('l')
print(l)
print('Feature: ')
print(strgfeat)
print('Plot minmimum')
print(minm)
print('Plot maxmimum')
print(maxm)
if strgfeat == gmod.nameparagenrelemampl[l] and strgfeat in gmod.namepara.genrelem[l]:
gmod.indxparagenrelemtemp = gmod.namepara.genrelem[l].index(strgfeat)
if (gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp] != 'gaus' and not gmod.listscalparagenrelem[l][gmod.indxparagenrelemtemp].startswith('lnor')):
raise Exception('')
stopchro(gdat, gdatmodi, 'tert')
def retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, paragenrscalfull, dictelem, numbelem):
gmod = getattr(gdat, strgmodl)
if strgpdfn == 'self':
minmfeat = getattr(gmod.minmpara, strgfeat)
maxmfeat = getattr(gmod.maxmpara, strgfeat)
lpri = numbelem[l] * np.log(1. / (maxmfeat - minmfeat))
if strgpdfn == 'logt':
lpri = retr_lprilogtdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'gaus':
lpri = retr_lprigausdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dexp':
maxmbgal = getattr(gmod, 'maxmbgal')
gmod.indxpara.bgaldistscal = getattr(gmod.indxpara, 'bgaldistscalpop%d' % l)
lpri = np.sum(np.log(pdfn_dnp.exp(dictelem[l]['bgal'], maxmbgal, paragenrscalfull[gmod.indxpara.bgaldistscal])))
if strgpdfn == 'expo':
maxmgang = getattr(gmod, 'maxmgang')
gang = retr_gang(dictelem[l]['lgal'], dictelem[l]['bgal'])
gmod.indxpara.gangdistscal = getattr(gmod.indxpara, 'gangdistscalpop%d' % l)
lpri = np.sum(np.log(pdfn_expo(gang, maxmgang, paragenrscalfull[gmod.indxpara.gangdistscal])))
lpri = -numbelem[l] * np.log(2. * pi)
if strgpdfn == 'tmpl':
lpri = np.sum(lpdfspatprioobjt(dictelem[l]['bgal'], dictelem[l]['lgal'], grid=False))
if strgpdfn == 'powr':
lpri = retr_lpripowrdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dpowslopbrek':
lpri = retr_lpridpowdist(gdat, strgmodl, dictelem[l][strgfeat], strgfeat, paragenrscalfull, l)
if strgpdfn == 'dsrcexpo':
lpri += -np.sum(np.sqrt((dictelem[l]['lgal'] - lgalsour)**2 + (dictelem[l]['bgal'] - bgalsour)**2) / \
getattr(gmod, 'dsrcdistsexppop%d' % l))
if strgpdfn == 'tmpl':
if strgpdfn.endswith('cons'):
pdfnspatpriotemp = getattr(gmod, 'pdfnspatpriotemp')
spatdistcons = paragenrscalfull[getattr(gmod.indxpara, 'spatdistcons')]
lpdfspatprio, lpdfspatprioobjt = retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons)
lpdfspatpriointp = lpdfspatprioobjt(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart)
lpdfspatpriointp = lpdfspatpriointp.T
setattr(gmodstat, 'lpdfspatpriointp', lpdfspatpriointp)
setattr(gmodstat, 'lpdfspatprioobjt', lpdfspatprioobjt)
else:
lpdfspatprioobjt = gmod.lpdfspatprioobjt
return lpri
def checstrgfeat(strgfrst, strgseco):
numbfrst = len(strgfrst)
numbseco = len(strgseco)
numb = min(numbfrst, numbseco)
if strgfrst[:numb] < strgseco[:numb]:
booltemp = True
elif strgfrst[:numb] == strgseco[:numb]:
if numbfrst >= numbseco:
booltemp = False
else:
booltemp = True
else:
booltemp = False
return booltemp
def retr_pathoutprtag(pathpcat, rtag):
pathoutprtag = pathpcat + '/data/outp/' + rtag + '/'
return pathoutprtag
def proc_finl(gdat=None, rtag=None, strgpdfn='post', listnamevarbproc=None, forcplot=False):
gdatmock = None
print('proc_finl()')
if rtag is None:
rtag = gdat.rtag
# determine if the final-processing if nominal or tiling
if isinstance(rtag, list):
listrtagmodi = rtag
rtagfinl = tdpy.retr_strgtimestmp() + rtag[0][15:] + 'tile'
booltile = True
else:
listrtagmodi = [rtag]
rtagfinl = rtag
booltile = False
# determine of the gdatfinl object is available
boolgdatfinl = chec_statfile(pathpcat, rtagfinl, 'gdatfinlpost')
boolgdatfinlgood = False
if boolgdatfinl:
print('Final-processing has been performed previously.')
pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
path = pathoutprtag + 'gdatfinl' + strgpdfn
try:
gdat = readfile(path)
boolgdatfinlgood = True
except:
print('gdatfinl object is corrupted.')
if boolgdatfinl and boolgdatfinlgood:
# read gdatfinl
pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
path = pathoutprtag + 'gdatfinl' + strgpdfn
gdatfinl = readfile(path)
if gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
if gdatfinl.rtagmock is not None:
path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
else:
if booltile:
gdatfinltile = tdpy.gdatstrt()
indxrtaggood = []
liststrgtile = []
listrtaggood = []
indxtiletemp = 0
for n, rtagmodi in enumerate(listrtagmodi):
# read gdatinit
boolgdatinit = chec_statfile(pathpcat, rtagmodi, 'gdatinit')
if not boolgdatinit:
if booltile:
print('Initial global object not found. Skipping...')
continue
else:
print('Initial global object not found. Quitting...')
return
pathoutprtag = retr_pathoutprtag(pathpcat, rtagmodi)
path = pathoutprtag + 'gdatinit'
gdatinit = readfile(path)
if booltile:
gdatfinltile = gdatinit
gdatfinl = gdatinit
else:
gdatfinl = gdatinit
pathoutprtagmodi = retr_pathoutprtag(pathpcat, rtagmodi)
listgdatmodi = []
for k in gdatinit.indxproc:
path = pathoutprtagmodi + 'gdatmodi%04d' % k + strgpdfn
listgdatmodi.append(readfile(path))
# erase
gdatdictcopy = deepcopy(gdatinit.__dict__)
for strg, valu in gdatdictcopy.items():
if strg.startswith('fitt.indxpara.'):
delattr(gdatinit, strg)
if gdatinit.boolmockonly:
print('Mock only run. Quitting final-processing...')
return
# read gdatmodi
print('rtagmodi')
print(rtagmodi)
boolgdatmodi = chec_statfile(pathpcat, rtagmodi, 'gdatmodipost')
if not boolgdatmodi:
print('Modified global object not found. Quitting final-processing...')
return
## list of other parameters to be flattened
gdatinit.liststrgvarbarryflat = deepcopy(listgdatmodi[0].liststrgvarbarry)
# temp
#for strg in ['memoresi']:
# gdatinit.liststrgvarbarryflat.remove(strg)
listparagenrscalfull = np.empty((gdatinit.numbsamptotl, gdatinit.fitt.maxmnumbpara))
if booltile:
gdatfinltile.pathoutprtag = retr_pathoutprtag(pathpcat, rtagfinl)
numbsamptotlrsmp = gdatinit.numbsamptotl
indxsamptotlrsmp = np.random.choice(gdatinit.indxsamptotl, size=gdatinit.numbsamptotl, replace=False)
# aggregate samples from the chains
if gdatinit.typeverb > 0:
print('Reading gdatmodi objects from all processes...')
timeinit = gdatinit.functime()
if gdatinit.typeverb > 0:
timefinl = gdatinit.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if gdatinit.fitt.numbparaelem > 0:
if len(getattr(listgdatmodi[0], 'list' + strgpdfn + 'gmodstat.indxelemfull')) == 0:
print('Found an empty element list. Skipping...')
continue
if gdatinit.typeverb > 0:
print('Accumulating np.arrays...')
timeinit = gdatinit.functime()
for strgvarb in gdatinit.liststrgvarbarryflat:
for k in gdatinit.indxproc:
if k == 0:
shap = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb).shape
shap = [shap[0], gdatinit.numbproc] + list(shap[1:])
temp = np.zeros(shap) - 1
if len(shap) > 2:
temp[:, k, :] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)
else:
temp[:, k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, temp)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if gdatfinl.typeverb > 0:
print('Accumulating lists...')
timeinit = gdatfinl.functime()
# lists of lists collected at each sample
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
listtemp = [[[] for k in gdatfinl.indxproc] for j in gdatfinl.indxsamp]
for j in gdatfinl.indxsamp:
for k in gdatfinl.indxproc:
listtemp[j][k] = getattr(listgdatmodi[k], 'list' + strgpdfn + strgvarb)[j]
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
if not booltile:
## np.maximum likelihood sample
gdatfinl.maxmllikproc = np.empty(gdatfinl.numbproc)
gdatfinl.indxswepmaxmllikproc = np.empty(gdatfinl.numbproc, dtype=int)
gdatfinl.sampmaxmllikproc = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))
for k in gdatfinl.indxproc:
gdatfinl.maxmllikproc[k] = listgdatmodi[k].maxmllikswep
gdatfinl.indxswepmaxmllikproc[k] = listgdatmodi[k].indxswepmaxmllik
gdatfinl.sampmaxmllikproc[k, :] = listgdatmodi[k].sampmaxmllik
listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')
# Gelman-Rubin test
if gdatfinl.numbproc > 1:
if gdatfinl.typeverb > 0:
print('Computing the Gelman-Rubin TS...')
timeinit = gdatfinl.functime()
gdatfinl.gmrbparagenrscalbase = np.zeros(gdatfinl.fitt.numbparagenrbase)
gdatfinl.gmrbstat = np.zeros((gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))
for k in gdatfinl.fitt.indxparagenrbase:
gdatfinl.gmrbparagenrscalbase[k] = tdpy.mcmc.gmrb_test(listparagenrscalfull[:, :, k])
if not np.isfinite(gdatfinl.gmrbparagenrscalbase[k]):
gdatfinl.gmrbparagenrscalbase[k] = 0.
listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')
for i in gdatfinl.indxener:
for j in gdatfinl.indxpixl:
for m in gdatfinl.indxevtt:
gdatfinl.gmrbstat[i, j, m] = tdpy.mcmc.gmrb_test(listcntpmodl[:, :, i, j, m])
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# calculate the autocorrelation of the chains
if gdatfinl.typeverb > 0:
print('Computing the autocorrelation of the chains...')
timeinit = gdatfinl.functime()
gdatfinl.atcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt, int(gdatfinl.numbparagenrfull / 2)))
gdatfinl.timeatcrcntp = np.empty((gdatfinl.numbproc, gdatfinl.numbener, gdatfinl.numbpixl, gdatfinl.numbevtt))
gdatfinl.atcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara, int(gdatfinl.numbparagenrfull / 2)))
gdatfinl.timeatcrpara = np.empty((gdatfinl.numbproc, gdatfinl.fitt.maxmnumbpara))
for k in gdatfinl.indxproc:
gdatfinl.atcrpara[k, :, :], gdatfinl.timeatcrpara[k, :] = tdpy.mcmc.retr_timeatcr(listparagenrscalfull[:, k, :], typeverb=gdatfinl.typeverb)
listcntpmodl = getattr(gdatfinl, 'list' + strgpdfn + 'cntpmodl')
gdatfinl.atcrcntp[k, :], gdatfinl.timeatcrcntp[k, :] = tdpy.mcmc.retr_timeatcr(listcntpmodl[:, k, :, :, :], typeverb=gdatfinl.typeverb)
timeatcrcntpmaxm = np.amax(gdatfinl.timeatcrcntp)
gdatfinl.timeatcrcntpmaxm = np.amax(timeatcrcntpmaxm)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
setattr(gdatfinl, 'list' + strgpdfn + 'sampproc', np.copy(getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')))
# flatten the list chains from different walkers
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
listtemp = []
listinpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for j in gdatfinl.indxsamp:
for k in gdatfinl.indxproc:
listtemp.append(listinpt[j][k])
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listtemp)
# flatten the np.array chains from different walkers
for strgvarb in gdatinit.liststrgvarbarryflat:
inpt = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
shap = [inpt.shape[0] * inpt.shape[1]] + list(inpt.shape[2:])
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, inpt.reshape(shap))
listparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrunitfull')
if booltile:
liststrgtile.append(rtagmodi.split('_')[-2][-4:])
listrtaggood.append(rtagmodi)
indxrtaggood.append(n)
indxtiletemp += 1
if len(liststrgtile) == 1:
for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
refrfeattile = [[] for q in gdatfinl.indxrefr]
setattr(gdatfinl, 'refr' + strgfeat, refrfeattile)
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
listvarb = []
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarb)
else:
hist = np.zeros_like(getattr(listgdatmodi[0], 'list' + strgpdfn + strgvarb))
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, hist)
for name, valu in gdatfinl.__dict__.items():
if name.startswith('refrhist'):
setattr(gdatfinl, name, np.zeros_like(getattr(gdatfinl, name)))
#for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
# refrfeattile = getattr(gdatfinl, 'refr' + strgfeat)
# #refrfeat = getattr(gdatfinl, 'refr' + strgfeat)
# refrfeat = [[] for q in gdatfinl.indxrefr]
# for q in gdatfinl.indxrefr:
# if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:
# refrfeat[q].append(refrfeattile[q])
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
# temp
if 'spec' in strgvarb:
continue
hist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
hist += getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for name, valu in gdatfinl.__dict__.items():
if name.startswith('refrhist'):
hist = getattr(gdatfinl, name)
hist += getattr(gdatfinl, name)
print('Done with the tile number %d, run number %d...' % (indxtiletemp, n))
if booltile:
gdatfinl.pathplotrtag = gdatfinl.pathimag + rtagfinl + '/'
make_fold(gdatfinl)
indxrtaggood = np.array(indxrtaggood).astype(int)
numbrtaggood = indxrtaggood.size
numbtile = numbrtaggood
print('Found %d tiles with run tags:' % numbrtaggood)
for indxrtaggoodtemp in indxrtaggood:
print(rtag[indxrtaggoodtemp])
# np.concatenate reference elements from different tiles
#for strgfeat in gdatfinl.refrgmod.namepara.genrelemtotl:
# refrfeat = getattr(gdatfinl, 'refr' + strgfeat, refrfeat)
# for q in gdatfinl.indxrefr:
# if strgfeat in gdatfinl.refrgmod.namepara.genrelem[q]:
# refrfeat[q] = np.concatenate(refrfeat[q], axis=1)
for strgvarb in gdatfinl.liststrgvarbarrysamp:
if not strgvarb in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
listvarb = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
if 'assc' in strgvarb:
numbrefrelemtotl = 0
for k, varbrsmp in enumerate(listvarb):
numbrefrelemtotl += varbrsmp.shape[1]
shap = [gdatfinl.numbsamptotl, numbrefrelemtotl]
listvarbtemp = np.empty(shap)
cntr = 0
for k, varb in enumerate(listvarb):
listvarbtemp[:, cntr:cntr+varb.shape[1]] = varb
cntr += varb.shape[1]
else:
shap = [gdatfinl.numbsamptotl * numbtile] + list(listvarb[0].shape[1:])
listvarbtemp = np.empty(shap)
for k, varb in enumerate(listvarb):
listvarbtemp[k*gdatfinl.numbsamptotl:(k+1)*gdatfinl.numbsamptotl, ...] = varb
setattr(gdatfinl, 'list' + strgpdfn + strgvarb, listvarbtemp)
else:
# np.maximum likelihood sample
if gdatfinl.fitt.numbparaelem > 0:
listindxelemfull = getattr(gdatfinl, 'list' + strgpdfn + 'indxelemfull')
listllik = getattr(gdatfinl, 'list' + strgpdfn + 'llik')
listlliktotl = getattr(gdatfinl, 'list' + strgpdfn + 'lliktotl')
indxsamptotlmlik = np.argmax(np.sum(np.sum(np.sum(listllik, 3), 2), 1))
# copy the np.maximum likelihood sample
for strgvarb in listgdatmodi[0].liststrgvarbarrysamp:
setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik, ...])
for strgvarb in listgdatmodi[0].liststrgvarblistsamp:
setattr(gdatfinl, 'mlik' + strgvarb, getattr(gdatfinl, 'list' + strgpdfn + strgvarb)[indxsamptotlmlik])
# temp -- dont gdatfinl.listllik and gdatfinl.listparagenrscalfull have the same dimensions?
gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]
gdatfinl.mlikparagenrscalfull = getattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalfull')[indxsamptotlmlik, :]
#if gdatfinl.fitt.numbparaelem > 0:
# gdatfinl.mlikindxelemfull = listindxelemfull[indxsamptotlmlik]
gdatfinl.mlikparagenrscalbase = gdatfinl.mlikparagenrscalfull[gdatfinl.fitt.indxparagenrbase]
for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):
setattr(gdatfinl, 'mlik' + gmod.nameparagenrbase, gdatfinl.mlikparagenrscalbase[k])
# add execution times to the chain output
gdatfinl.timereal = np.zeros(gdatfinl.numbproc)
gdatfinl.timeproc = np.zeros(gdatfinl.numbproc)
for k in gdatfinl.indxproc:
gdatfinl.timereal[k] = listgdatmodi[k].timereal
gdatfinl.timeproc[k] = listgdatmodi[k].timeproc
# find the np.maximum likelihood and posterior over the chains
gdatfinl.indxprocmaxmllik = np.argmax(gdatfinl.maxmllikproc)
#gdatfinl.maxmlliktotl = gdatfinl.maxmllikproc[gdatfinl.indxprocmaxmllik]
gdatfinl.indxswepmaxmllik = gdatfinl.indxprocmaxmllik * gdatfinl.numbparagenrfull + gdatfinl.indxswepmaxmllikproc[gdatfinl.indxprocmaxmllik]
gdatfinl.sampmaxmllik = gdatfinl.sampmaxmllikproc[gdatfinl.indxprocmaxmllik, :]
if strgpdfn == 'post':
levipost = retr_levipost(listlliktotl)
setattr(gdatfinl, strgpdfn + 'levipost', levipost)
if strgpdfn == 'prio':
leviprio = np.log(np.mean(np.exp(listlliktotl)))
setattr(gdatfinl, strgpdfn + 'leviprio', leviprio)
# parse the sample vector
listparagenrscalbase = listparagenrscalfull[:, gdatfinl.fitt.indxparagenrbase]
for k, gmod.nameparagenrbase in enumerate(gdatfinl.fitt.nameparagenrbase):
setattr(gdatfinl, 'list' + strgpdfn + gmod.nameparagenrbase, listparagenrscalbase[:, k])
setattr(gdatfinl, 'list' + strgpdfn + 'paragenrscalbase', listparagenrscalbase)
if strgpdfn == 'post' and gdatfinl.checprio:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdatfinlprio'
try:
gdatprio = readfile(path)
except:
proc_finl(gdat=gdatfinl, strgpdfn='prio', listnamevarbproc=listnamevarbproc, forcplot=forcplot)
else:
gdatprio = None
# post process samples
## bin element parameters
if gdatfinl.typeverb > 0:
print('Binning the probabilistic catalog spatially...')
timeinit = gdatfinl.functime()
if not booltile:
if gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.boolbinsspat:
histlgalbgalelemstkd = [[] for l in gdatfinl.fittindxpopl]
listlgal = getattr(gdatfinl, 'list' + strgpdfn + 'lgal')
listbgal = getattr(gdatfinl, 'list' + strgpdfn + 'bgal')
for l in gdatfinl.fittindxpopl:
if gdatfinl.fitttypeelem[l] != 'lghtline':
histlgalbgalelemstkd[l] = np.zeros((gdatfinl.numbbgalpntsprob, gdatfinl.numblgalpntsprob, gdatfinl.numbbinsplot, numb))
temparry = np.concatenate([listlgal[n][l] for n in gdatfinl.indxsamptotl])
temp = np.empty((len(temparry), 3))
temp[:, 0] = temparry
temp[:, 1] = np.concatenate([listbgal[n][l] for n in gdatfinl.indxsamptotl])
temp[:, 2] = np.concatenate([getattr(gdatfinl, 'list' + strgpdfn + strgfeat)[n][l] for n in gdatfinl.indxsamptotl])
bins = getattr(gdatfinl, 'bins' + strgfeat)
histlgalbgalelemstkd[l][:, :, :, k] = np.histogramdd(temp, \
bins=(gdatfinl.binslgalpntsprob, gdatfinl.binsbgalpntsprob, bins))[0]
setattr(gdatfinl, strgpdfn + 'histlgalbgalelemstkd', histlgalbgalelemstkd)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
## construct a condensed catalog of elements
if gdatfinl.boolcondcatl and gdatfinl.fitt.numbparaelem > 0:
if gdatfinl.typeverb > 0:
print('Constructing a condensed catalog...')
timeinit = gdatfinl.functime()
retr_condcatl(gdatfinl)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# construct lists of samples for each proposal type
listindxproptype = getattr(gdatfinl, 'list' + strgpdfn + 'indxproptype')
listboolpropaccp = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropaccp')
listboolpropfilt = getattr(gdatfinl, 'list' + strgpdfn + 'boolpropfilt')
listindxsamptotlproptotl = []
listindxsamptotlpropfilt = []
listindxsamptotlpropaccp = []
listindxsamptotlpropreje = []
for n in gdatfinl.indxproptype:
indxsampproptype = np.where(listindxproptype == gdatfinl.indxproptype[n])[0]
listindxsamptotlproptotl.append(indxsampproptype)
listindxsamptotlpropaccp.append(np.intersect1d(indxsampproptype, np.where(listboolpropaccp)[0]))
listindxsamptotlpropfilt.append(np.intersect1d(indxsampproptype, np.where(listboolpropfilt)[0]))
listindxsamptotlpropreje.append(np.intersect1d(indxsampproptype, np.where(np.logical_not(listboolpropaccp))[0]))
if listindxsamptotlproptotl[n].size == 0:
accp = 0.
else:
accp = float(listindxsamptotlpropaccp[n].size) / listindxsamptotlproptotl[n].size
setattr(gdatfinl, 'accp' + gdatfinl.nameproptype[n], accp)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlproptotl', listindxsamptotlproptotl)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropaccp', listindxsamptotlpropaccp)
setattr(gdatfinl, 'list' + strgpdfn + 'indxsamptotlpropreje', listindxsamptotlpropreje)
if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
if gdatfinl.rtagmock is not None:
path = gdatfinl.pathoutprtagmock + 'gdatfinlpost'
gdatmock = readfile(path)
# posterior corrections
if gdatfinl.fitt.numbparaelem > 0 and strgpdfn == 'post':
## perform corrections
if gdatfinl.typedata == 'inpt':
if gdatfinl.boolcrex or gdatfinl.boolcrin:
for gmod.namepara.genrelemvarbhist in gdatfinl.liststrgvarbhist:
strgvarb = gmod.namepara.genrelemvarbhist[0]
if gmod.namepara.genrelemvarbhist[1].startswith('aerr') or len(gmod.namepara.genrelemvarbhist[2]) > 0 and gmod.namepara.genrelemvarbhist[2].startswith('aerr'):
continue
if gmod.namepara.genrelemvarbhist[1] == 'spec' or gmod.namepara.genrelemvarbhist[1] == 'deflprof' or gmod.namepara.genrelemvarbhist[1] == 'specplot':
continue
if len(gmod.namepara.genrelemvarbhist[2]) > 0 and (gmod.namepara.genrelemvarbhist[2] == 'spec' or \
gmod.namepara.genrelemvarbhist[2] == 'deflprof' or gmod.namepara.genrelemvarbhist[2] == 'specplot'):
continue
## internal correction
listhist = getattr(gdatfinl, 'list' + strgpdfn + strgvarb)
for qq in gdatmock.indxrefr:
l = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[1][0])
qq = int(gmod.namepara.genrelemvarbhist[3][qq].split('pop')[2][0])
if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and \
(len(gmod.namepara.genrelemvarbhist[2]) == 0 or gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr):
listhistincr = listhist
else:
if gmod.namepara.genrelemvarbhist[1][-4:] in gdatfinl.listnamerefr and len(gmod.namepara.genrelemvarbhist[2]) > 0:
listcmpltrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (l, qq))], 2)
listfdistrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[2] + 'pop%dpop%d' % (qq, l))], 2)
elif len(gmod.namepara.genrelemvarbhist[2][:-4]) > 0 and gmod.namepara.genrelemvarbhist[2][-4:] in gdatfinl.listnamerefr:
listcmpltrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (l, qq))], 1)
listfdistrue = np.stack(gdatfinl.numbbinsplot * \
[getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[1] + 'pop%dpop%d' % (qq, l))], 1)
else:
listcmpltrue = getattr(gdatmock, 'listpostcmpl' + gmod.namepara.genrelemvarbhist[3][qq])
listfdistrue = getattr(gdatmock, 'listpostfdis' + gmod.namepara.genrelemvarbhist[3][qq])
if len(gmod.namepara.genrelemvarbhist[2]) == 0:
listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot))
for k in gdatfinl.indxbinsplot:
listcmplboot[:, k] = np.random.choice(listcmpltrue[:, k], size=gdatfinl.numbsampboot)
listfdisboot[:, k] = np.random.choice(listfdistrue[:, k], size=gdatfinl.numbsampboot)
listhistboot[:, k] = np.random.choice(listhist[:, k], size=gdatfinl.numbsampboot)
else:
listcmplboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
listfdisboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
listhistboot = np.empty((gdatfinl.numbsampboot, gdatfinl.numbbinsplot, gdatfinl.numbbinsplot))
for a in gdatfinl.indxbinsplot:
for b in gdatfinl.indxbinsplot:
listcmplboot[:, a, b] = np.random.choice(listcmpltrue[:, a, b], size=gdatfinl.numbsampboot)
listfdisboot[:, a, b] = np.random.choice(listfdistrue[:, a, b], size=gdatfinl.numbsampboot)
listhistboot[:, a, b] = np.random.choice(listhist[:, a, b], size=gdatfinl.numbsampboot)
indxbadd = np.where(listcmplboot == -1)
indxbaddzero = np.where(listcmplboot == 0.)
listhistincr = listhistboot / listcmplboot * (1. - listfdisboot)
listhistincr[indxbadd] = -1.5
listhistincr[indxbaddzero] = 1.5
listgdatmodi[0].liststrgchan += ['incr' + gmod.namepara.genrelemvarbhist[4][qq]]
setattr(gdatfinl, 'listpostincr' + gmod.namepara.genrelemvarbhist[4][qq], listhistincr)
## external correction
for q in gdatfinl.indxrefr:
nametemp = gmod.namepara.genrelemvarbhist[1]
if len(gmod.namepara.genrelemvarbhist[2]) > 0:
nametemp += gmod.namepara.genrelemvarbhist[2]
nametemp += 'pop%dpop%dpop%d' % (q, qq, l)
crexhist = getattr(gdatfinl, 'crex' + nametemp)
if crexhist is not None:
listhistexcr = listhistincr * crexhist
if crexhist.ndim == 1 and listhistincr.ndim == 3:
raise Exception('')
listgdatmodi[0].liststrgchan += ['excr' + nametemp]
setattr(gdatfinl, 'listpostexcr' + nametemp, listhistexcr)
# compute credible intervals
if gdatfinl.typeverb > 0:
print('Computing credible intervals...')
timeinit = gdatfinl.functime()
for strgchan in listgdatmodi[0].liststrgchan:
if booltile:
if strgchan in gdatfinl.liststrgvarbarryswep or strgchan in listgdatmodi[0].liststrgvarblistsamp:
continue
if not (strgchan.startswith('hist') or strgchan.startswith('incr') or strgchan.startswith('excr')):
continue
if gdatfinl.fitt.numbparaelem > 0 and strgchan in [strgvarbhist[0] for strgvarbhist in gdatfinl.liststrgvarbhist]:
if 'spec' in strgchan:
continue
if strgchan == 'spec':
continue
listtemp = getattr(gdatfinl, 'list' + strgpdfn + strgchan)
if isinstance(listtemp, list):
if booltile:
continue
# ensure that transdimensional lists are not included
# temp
if strgchan in gdatfinl.fitt.namepara.genrelemtotl or strgchan == 'indxelemfull':
continue
pctltemp = []
pmeatemp = []
meditemp = []
errrtemp = []
stdvtemp = []
numb = len(listtemp[0])
for k in range(numb):
if isinstance(listtemp[0][k], list):
continue
shap = [gdatfinl.numbsamptotl] + list(listtemp[0][k].shape)
temp = np.zeros(shap)
for n in gdatfinl.indxsamptotl:
temp[n, ...] = listtemp[n][k]
pctltempsing = tdpy.retr_pctlvarb(temp)
pmeatempsing = np.mean(temp, axis=0)
meditempsing = pctltempsing[0, ...]
errrtempsing = tdpy.retr_errrvarb(pctltempsing)
stdvtempsing = np.std(temp)
pctltemp.append(pctltempsing)
pmeatemp.append(pmeatempsing)
meditemp.append(meditempsing)
errrtemp.append(errrtempsing)
stdvtemp.append(stdvtempsing)
else:
# this is needed for finding posterior moments of features of associated reference elements
if 'asscref' in strgchan:
if listtemp.ndim != 2:
raise Exception('')
pmeatemp = np.zeros(listtemp.shape[1])
pctltemp = np.zeros([3] + [listtemp.shape[1]])
# temp -- this only works for 2D listtemp
for k in range(listtemp.shape[1]):
indxassc = np.where(np.isfinite(listtemp[:, k]))[0]
if indxassc.size > 0:
pctltemp[:, k] = tdpy.retr_pctlvarb(listtemp[indxassc, k])
pmeatemp[k] = np.mean(listtemp[indxassc, k])
else:
pctltemp = tdpy.retr_pctlvarb(listtemp)
pmeatemp = np.mean(listtemp, axis=0)
errrtemp = tdpy.retr_errrvarb(pctltemp)
stdvtemp = np.std(pctltemp, axis=0)
meditemp = pctltemp[0, ...]
if strgchan in gdatfinl.listnamevarbcpct:
cpcttemp = np.empty([gdatfinl.numbsampcpct] + [3] + list(listtemp.shape[1:]))
for n in gdatfinl.indxsampcpct:
cpcttemp[n, ...] = tdpy.retr_pctlvarb(listtemp[:n+1, ...])
setattr(gdatfinl, 'pctl' + strgpdfn + strgchan, pctltemp)
setattr(gdatfinl, 'medi' + strgpdfn + strgchan, meditemp)
setattr(gdatfinl, 'pmea' + strgpdfn + strgchan, pmeatemp)
setattr(gdatfinl, 'errr' + strgpdfn + strgchan, errrtemp)
setattr(gdatfinl, 'stdv' + strgpdfn + strgchan, stdvtemp)
if strgchan in gdatfinl.listnamevarbcpct:
setattr(gdatfinl, 'cpct' + strgpdfn + strgchan, cpcttemp)
if not booltile:
pmealliktotl = getattr(gdatfinl, 'pmea' + strgpdfn + 'lliktotl')
stdvlliktotl = getattr(gdatfinl, 'stdv' + strgpdfn + 'lliktotl')
minmlliktotl = np.amin(listlliktotl)
maxmlliktotl = np.amax(listlliktotl)
skewlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**3)
kurtlliktotl = np.mean(((listlliktotl - pmealliktotl) / stdvlliktotl)**4)
setattr(gdatfinl, 'minm' + strgpdfn + 'lliktotl', minmlliktotl)
setattr(gdatfinl, 'maxm' + strgpdfn + 'lliktotl', maxmlliktotl)
setattr(gdatfinl, 'skew' + strgpdfn + 'lliktotl', skewlliktotl)
setattr(gdatfinl, 'kurt' + strgpdfn + 'lliktotl', kurtlliktotl)
if strgpdfn == 'post':
infopost = retr_infofromlevi(pmealliktotl, levipost)
setattr(gdatfinl, strgpdfn + 'infopost', infopost)
if strgpdfn == 'post' and gdatfinl.checprio:
leviprio = getattr(gdatprio, 'prioleviprio')
infoprio = retr_infofromlevi(pmealliktotl, leviprio)
setattr(gdatfinl, strgpdfn + 'infoprio', infoprio)
bcom = maxmlliktotl - pmealliktotl
setattr(gdatfinl, strgpdfn + 'bcom', bcom)
listnametemp = ['lliktotl']
if gmod.numbparaelem > 0:
listnametemp += ['lpripena']
for namevarbscal in listnametemp:
listtemp = getattr(gdatfinl, 'list' + strgpdfn + namevarbscal)
minm = np.amin(listtemp)
maxm = np.amax(listtemp)
setattr(gdatfinl, 'minm' + namevarbscal, minm)
setattr(gdatfinl, 'maxm' + namevarbscal, maxm)
setattr(gdatfinl, 'scal' + namevarbscal, 'self')
retr_axis(gdat, namevarbscal)
if gdatfinl.checprio:
for strgvarb in gdatfinl.listnamevarbscal:
setp_pdfnvarb(gdatfinl, strgpdfn, strgvarb, strgvarb)
for l0 in gdatfinl.fittindxpopl:
for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)
for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
setp_pdfnvarb(gdatfinl, strgpdfn, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)
# calculate information gain
if strgpdfn == 'post':
for namevarbscal in gdatfinl.listnamevarbscal:
setp_info(gdatfinl, gdatprio, namevarbscal, namevarbscal)
for l0 in gdatfinl.fittindxpopl:
for strgfeatfrst in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l0)
for strgfeatseco in gdatfinl.fitt.namepara.genrelem[l0]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
setp_info(gdatfinl, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l0, nameseco=strgfeatseco)
if gdatfinl.typeverb > 0:
timefinl = gdatfinl.functime()
print('Done in %.3g seconds.' % (timefinl - timeinit))
# flatten the np.arrays which have been collected at each sweep
#setattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'flat', getattr(gdat, 'list' + strgpdfn + strgpdfntemp + 'totl').flatten())
if not booltile:
# memory usage
listmemoresi = getattr(gdatfinl, 'list' + strgpdfn + 'memoresi')
gdatfinl.meanmemoresi = np.mean(listmemoresi, 1)
gdatfinl.derimemoresi = (gdatfinl.meanmemoresi[-1] - gdatfinl.meanmemoresi[0]) / gdatfinl.numbswep
gdatfinl.timerealtotl = time.time() - gdatfinl.timerealtotl
gdatfinl.timeproctotl = time.clock() - gdatfinl.timeproctotl
gdatfinl.timeproctotlswep = gdatfinl.timeproctotl / gdatfinl.numbswep
if gdatfinl.timeatcrcntpmaxm == 0.:
gdatfinl.timeprocnorm = 0.
else:
gdatfinl.timeprocnorm = gdatfinl.timeproctotlswep / gdatfinl.timeatcrcntpmaxm
# write the final gdat object
path = gdatfinl.pathoutprtag + 'gdatfinl' + strgpdfn
if gdatfinl.typeverb > 0:
print('Writing gdatfinl to %s...' % path)
writfile(gdatfinl, path)
filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')
filestat.write('gdatfinl%s written.\n' % strgpdfn)
filestat.close()
if not booltile:
if gdatfinl.typeverb > 0:
for k in gdatfinl.indxproc:
print('Process %d has been completed in %d real seconds, %d CPU seconds.' % (k, gdatfinl.timereal[k], gdatfinl.timeproc[k]))
print('Parent process has run in %d real seconds, %d CPU seconds.' % (gdatfinl.timerealtotl, gdatfinl.timeproctotl))
print('HACKING!!')
gdatfinl.strgpdfn = 'post'
print('Checking whether post-processing plots already exist.')
booltemp = chec_statfile(pathpcat, rtagfinl, 'plotfinl')
if booltemp:
print('Final plots already exist. Skipping...')
else:
if strgpdfn == 'post' and gdatfinl.checprio:
path = pathoutprtag + 'gdatfinlprio'
gdatprio = readfile(path)
else:
gdatprio = None
if gdatfinl.makeplot and getattr(gdatfinl, 'makeplotfinl' + strgpdfn) or forcplot:
plot_finl(gdatfinl, gdatprio=gdatprio, strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)
filestat = open(gdatfinl.pathoutprtag + 'stat.txt', 'a')
filestat.write('plotfinl%s written.\n' % strgpdfn)
filestat.close()
def retr_listgdat(listrtag, typegdat='finlpost'):
listgdat = []
for rtag in listrtag:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdat%s' % typegdat
listgdat.append(readfile(path))
return listgdat
def make_fold(gdat):
for strgpdfn in gdat.liststrgpdfn:
setattr(gdat, 'path' + strgpdfn, gdat.pathplotrtag + strgpdfn + '/')
path = getattr(gdat, 'path' + strgpdfn)
for nameseco in ['finl', 'fram', 'anim', 'opti']:
setattr(gdat, 'path' + strgpdfn + nameseco, path + nameseco + '/')
for nameseco in ['diag', 'lpac', 'varbscal', 'cond', 'varbscalproc']:
setattr(gdat, 'path' + strgpdfn + 'finl' + nameseco, path + 'finl/' + nameseco + '/')
for n in gdat.indxproptype:
setattr(gdat, 'path' + strgpdfn + 'finl' + gdat.nameproptype[n], path + 'finl/lpac/' + gdat.nameproptype[n] + '/')
for namethrd in ['hist', 'trac', 'join', 'cova']:
setattr(gdat, 'path' + strgpdfn + 'finlvarbscal' + namethrd, path + 'finl/varbscal/' + namethrd + '/')
for strgphas in gdat.liststrgphas + ['init']:
liststrgfold = getattr(gdat, 'liststrgfold' + strgphas)
for nameseco in liststrgfold:
if strgphas == 'init':
if nameseco == 'assc' or nameseco.startswith('cmpl') or nameseco.startswith('fdis'):
continue
setattr(gdat, 'path' + strgphas + nameseco[:-1], gdat.pathplotrtag + 'init/' + nameseco)
else:
setattr(gdat, 'path' + strgpdfn + strgphas + nameseco[:-1], path + strgphas + '/' + nameseco)
gdat.pathinfo = gdat.pathplotrtag + 'info/'
## make the directories
for attr, valu in gdat.__dict__.items():
if attr.startswith('path'):
os.system('mkdir -p %s' % valu)
def make_cmapdivg(strgcolrloww, strgcolrhigh):
funccolr = mpl.colors.ColorConverter().to_rgb
colrloww = funccolr(strgcolrloww)
colrhigh = funccolr(strgcolrhigh)
cmap = make_cmap([colrloww, funccolr('white'), 0.5, funccolr('white'), colrhigh])
return cmap
def make_cmap(seq):
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mpl.colors.LinearSegmentedColormap('CustomMap', cdict)
def setp_pdfnvarb(gdat, strgpdfn, name, namefull, nameseco=None):
if listvarb.ndim == 1:
shaptemp = [gdat.numbbinspdfn, 1]
else:
shaptemp = [gdat.numbbinspdfn] + list(listvarb.shape[1:])
pdfn = np.empty(shaptemp)
if listvarb.ndim == 1:
binsvarb = getattr(gdat.binspara, name)
deltvarb = getattr(gdat, 'delt' + name)
pdfn[:, 0] = np.histogram(listvarb, bins=binsvarb)[0].astype(float)
pdfn[:, 0] /= np.sum(pdfn[:, 0])
pdfn[:, 0] /= deltvarb
else:
binsvarb = np.linspace(0, gmod.maxmpara.numbelemtotl, 51)
if listvarb.ndim == 2:
for k in range(listvarb.shape[1]):
pdfn[:, k] = np.histogram(listvarb[:, k], bins=binsvarb)[0].astype(float)
pdfn[:, k] /= np.sum(pdfn[:, k])
pdfn *= 50.
if listvarb.ndim == 3:
for k in range(listvarb.shape[1]):
for m in range(listvarb.shape[2]):
pdfn[:, k, m] = np.histogram(listvarb[:, k, m], bins=binsvarb)[0].astype(float)
pdfn[:, k, m] /= np.sum(pdfn[:, k, m])
pdfn *= 2500.
pdfn[np.where(pdfn < 1e-50)[0]] = 1e-50
setattr(gdat, 'pdfn' + strgpdfn + namefull, pdfn)
def setp_info(gdat, gdatprio, name, namefull, nameseco=None, namesecofull=None):
listpost = getattr(gdat, 'listpost' + namefull)
listprio = getattr(gdatprio, 'listprio' + namefull)
pdfnpost = getattr(gdat, 'pdfnpost' + namefull)
pdfnprio = getattr(gdatprio, 'pdfnprio' + namefull)
if listpost.ndim == 3:
infodens = np.empty((gdat.numbbinspdfn, listpost.shape[1], listpost.shape[2]))
info = np.empty((listpost.shape[1], listpost.shape[2]))
pvks = np.empty((listpost.shape[1], listpost.shape[2]))
else:
if listpost.ndim == 1:
numbtemp = 1
else:
numbtemp = listpost.shape[1]
infodens = np.empty((gdat.numbbinspdfn, numbtemp))
info = np.empty(numbtemp)
pvks = np.empty(numbtemp)
if listpost.ndim == 1:
listpost = listpost[:, None]
listprio = listprio[:, None]
deltvarb = getattr(gdat, 'delt' + name)
else:
if listpost.ndim == 2:
deltvarb = 1. / 50
else:
deltvarb = 1. / 50**list2
if listpost.ndim == 1 or listpost.ndim == 2:
for k in range(listpost.shape[1]):
infodens[:, k] = retr_infodens(pdfnpost[:, k], pdfnprio[:, k])
info[k] = np.sum(infodens[:, k] * deltvarb)
temp, pvks[k] = sp.stats.ks_2samp(listpost[:, k], listprio[:, k])
if listpost.ndim == 3:
for k in range(listpost.shape[1]):
for m in range(listpost.shape[2]):
infodens[:, k, m] = retr_infodens(pdfnpost[:, k, m], pdfnprio[:, k, m])
info[k, m] = np.sum(infodens[:, k, m] * deltvarb)
temp, pvks[k, m] = sp.stats.ks_2samp(listpost[:, k, m], listprio[:, k, m])
setattr(gdat, 'pvks' + namefull, pvks)
setattr(gdat, 'infodens' + namefull, infodens)
setattr(gdat, 'info' + namefull, info)
# check the state file
def chec_statfile(pathpcat, rtag, strggdat, typeverb=1):
print('Checking the state file %s for %s...' % (strggdat, rtag))
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
# check the status file
if not os.path.isfile(pathoutprtag + 'stat.txt'):
if typeverb > 0:
print('pathoutprtag')
print(pathoutprtag)
print('stat.txt not found.')
return False
# check the global object
filestat = open(pathoutprtag + 'stat.txt', 'r')
booltemp = False
linesrch = strggdat + ' written.\n'
for line in filestat:
if line == linesrch:
booltemp = True
filestat.close()
if not booltemp:
if typeverb > 0:
print('bad %s status.' % (strggdat))
return False
else:
return True
def retr_los3(dlos, lgal, bgal):
dglc = np.sqrt(8.5e3**2 + dlos**2 - 2. * dlos * 8.5e3 * np.cos(bgal) * np.cos(lgal))
thet = np.arccos(np.sin(bgal) * dlos / dglc)
phii = np.arcsin(np.sqrt(np.cos(bgal)**2 * dlos**2 + 8.5e3**2 - 2 * dlos * np.cos(bgal) * 8.5e3) / dglc)
return dglc, thet, phii
def retr_glc3(dglc, thet, phii):
xpos = dglc * np.sin(thet) * np.cos(phii)
ypos = dglc * np.sin(thet) * np.sin(phii)
zpos = dglc * np.cos(thet)
dlos = np.sqrt(zpos**2 + xpos**2 + (8.5e3 - ypos)**2)
lgal = np.arctan2(8.5e3 - ypos, xpos) - np.pi / 2
bgal = np.arcsin(zpos / dlos)
return dlos, lgal, bgal
def retr_lumipuls(geff, magf, per0):
# temp -- this is bolometric luminosity np.whereas dictelem[l]['flux'] is differential!
lumi = 9.6e33 * (geff / 0.2) * (magf / 10**8.5)**2 * (3e-3 / per0)*4
return lumi
def retr_lumi(gdat, flux, dlos, reds=None):
lumi = flux * 4. * np.pi * dlos**2 * gdat.prsccmtr**2 / gdat.ergsgevv
# temp
# redshift correction
if reds is not None:
lumi *= (1. + reds)**2
return lumi
def retr_flux(gdat, lumi, dlos, reds=None):
flux = lumi / 4. / np.pi / dlos**2 / gdat.prsccmtr**2 * gdat.ergsgevv
# temp
# redshift correction
if reds is not None:
pass
return flux
def retr_per1(per0, magf):
per1 = 3.3e-20 * (magf / 10**8.5)**2 * (3e-3 / per0)
return per1
def retr_dlosgalx(lgal, bgal, dglc):
# temp -- this is obviously wrong
dlos = 8.5e3 - dglc
return dlos
def retr_arryfromlist(listtemp):
shap = [len(listtemp)] + list(listtemp[0].shape)
arry = np.empty(shap)
for k in range(len(listtemp)):
arry[k, ...] = listtemp[k]
return arry
def proc_cntpdata(gdat):
# exclude voxels with vanishing exposure
## data counts
if gdat.typedata == 'inpt':
gdat.cntpdata = retr_cntp(gdat, gdat.sbrtdata)
# data variance
gdat.varidata = np.maximum(gdat.cntpdata, 1.)
# correct the likelihoods for the constant data dependent factorial
gdat.llikoffs = -sp.special.gammaln(gdat.cntpdata + 1)
## spatial average
gdat.sbrtdatamean, gdat.sbrtdatastdv = retr_spatmean(gdat, gdat.cntpdata, boolcntp=True)
# data count limits
minmcntpdata = np.amin(gdat.cntpdata)
maxmcntpdata = np.amax(gdat.cntpdata)
minm = minmcntpdata
maxm = maxmcntpdata
setp_varb(gdat, 'cntpdata', minm=minm, maxm=maxm, lablroot='$C_{D}$', scal='asnh', strgmodl='plot')
maxm = maxmcntpdata
minm = 1e-1 * minmcntpdata
for strgmodl in gdat.liststrgmodl:
gmod = getattr(gdat, strgmodl)
setp_varb(gdat, 'cntpmodl', minm=minm, maxm=maxm, strgmodl=strgmodl, scal='asnh')
print('gdat.labltickmajrpara.cntpmodl')
print(gdat.labltickmajrpara.cntpmodl)
# residual limits
maxm = np.ceil(maxmcntpdata * 0.1)
minm = -np.ceil(maxmcntpdata * 0.1)
setp_varb(gdat, 'cntpresi', minm=minm, maxm=maxm, lablroot='$C_{R}$', scal='asnh', strgmodl='plot')
# 1-point function of the data counts
for m in gdat.indxevtt:
if gdat.numbpixl > 1:
for i in gdat.indxener:
print('gdat.cntpdata[i, :, m]')
summgene(gdat.cntpdata[i, :, m])
print('gdat.binspara.cntpdata')
summgene(gdat.binspara.cntpdata)
histcntp = np.histogram(gdat.cntpdata[i, :, m], bins=gdat.binspara.cntpdata)[0]
setattr(gdat, 'histcntpdataen%02devt%d' % (i, m), histcntp)
else:
histcntp = np.histogram(gdat.cntpdata[:, 0, m], bins=gdat.binspara.cntpdata)[0]
setattr(gdat, 'histcntpdataevt%d' % m, histcntp)
# obtain cartesian versions of the maps
if gdat.typepixl == 'cart':
## data counts
gdat.cntpdatacart = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))
gdat.cntpdatacart[:, gdat.indxpixlrofi, :] = gdat.cntpdata
gdat.cntpdatacart = gdat.cntpdatacart.reshape((gdat.numbener, gdat.numbsidecart, gdat.numbsidecart, gdat.numbevtt))
def retr_infodens(pdfnpost, pdfnprio):
infodens = pdfnpost * np.log(pdfnpost / pdfnprio)
return infodens
def retr_llik(gdat, strgmodl, cntpmodl):
if gdat.liketype == 'pois':
llik = gdat.cntpdata * np.log(cntpmodl) - cntpmodl
if gdat.liketype == 'gaus':
llik = -0.5 * (gdat.cntpdata - cntpmodl)**2 / gdat.varidata
return llik
def retr_mapsgaus(gdat, lgal, bgal, spec, size, ellp, angl):
rttrmatr = np.array([[np.cos(angl), -np.sin(angl)], [np.sin(angl), np.cos(angl)]])
icovmatr = np.array([[1. / ((1. - ellp) * size)**2, 0.], [0., 1. / size**2]])
posi = np.array([lgalgrid - lgal, bgalgrid - bgal])
mapsgaus = flux * np.exp(-0.5 * np.sum(posi * tensordot(self.icovmatr, posi, (1,0)), 0)) / size**2 / (1. - ellp)
return mapsgaus
def retr_sbrtsers(gdat, lgalgrid, bgalgrid, lgal, bgal, spec, size, ellp, angl, seri=np.array([4.])):
lgalrttr = (1. - ellp) * (np.cos(angl) * (lgalgrid - lgal) - np.sin(angl) * (bgalgrid - bgal))
bgalrttr = np.sin(angl) * (lgalgrid - lgal) + np.cos(angl) * (bgalgrid - bgal)
angl = np.sqrt(lgalrttr**2 + bgalrttr**2)
# interpolate pixel-convolved Sersic surface brightness
if gdat.typesers == 'intp':
shapinpt = angl.shape
inpt = np.empty(list(shapinpt) + [3])
inpt[..., 0] = angl
inpt[..., 1] = size
inpt[..., 2] = seri
sbrtsers = spec[:, None, None] * sp.interpolate.interpn((gdat.binspara.lgalsers, gdat.binspara.halfsers, gdat.binspara.indxsers), gdat.sersprof, inpt)[None, :, None]
# evaluate directly de Vaucouleurs
if gdat.typesers == 'vauc':
sbrtsers = spec[:, None, None] * retr_sbrtsersnorm(angl, size)[None, :, None]
return sbrtsers
def retr_sbrtsersnorm(angl, halfsers, indxsers=4.):
## this approximation works for 0.5 < indx < 10
factsers = 1.9992 * indxsers - 0.3271
## surface brightness profile at the half-light radius for a 1 erg cm^-2 s^-1 A^-1 source
if indxsers == 4.:
sbrthalf = 1. / 7.2 / np.pi / halfsers**2
else:
sbrthalf = 1. / 2. / np.pi / np.exp(factsers) * factsers**(2 * indxsers) / indxsers / sp.special.gamma(2. * indxsers) / halfsers**2
## surface brightness profile
sbrtsers = sbrthalf * np.exp(-factsers * ((angl / halfsers)**(1. / indxsers) - 1.))
return sbrtsers
def copytdgu(varb):
if isinstance(varb, np.ndarray):
return np.copy(varb)
else:
return deepcopy(varb)
def proc_anim(rtag):
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
print('Making animations of frame plots for %s...' % rtag)
path = pathoutprtag + 'gdatinit'
gdat = readfile(path)
for strgpdfn in gdat.liststrgpdfn:
for nameextn in gdat.liststrgfoldanim:
pathframextn = gdat.pathimag + rtag + '/' + strgpdfn + '/fram/' + nameextn
pathanimextn = gdat.pathimag + rtag + '/' + strgpdfn + '/anim/' + nameextn
try:
listfile = fnmatch.filter(os.listdir(pathframextn), '*_swep*.pdf')
except:
print('%s failed.' % pathframextn)
continue
listfiletemp = []
for thisfile in listfile:
listfiletemp.extend((thisfile.split('_')[0]).rsplit('/', 1))
listname = list(set(listfiletemp))
if len(listname) == 0:
continue
shuffle(listname)
for name in listname:
strgtemp = '%s*_swep*.pdf' % name
listfile = fnmatch.filter(os.listdir(pathframextn), strgtemp)
numbfile = len(listfile)
liststrgextn = []
for k in range(numbfile):
liststrgextn.append((listfile[k].split(name)[1]).split('_')[0])
liststrgextn = list(set(liststrgextn))
for k in range(len(liststrgextn)):
listfile = fnmatch.filter(os.listdir(pathframextn), name + liststrgextn[k] + '_swep*.pdf')
numbfile = len(listfile)
indxfilelowr = 0
if indxfilelowr < numbfile:
indxfileanim = np.arange(indxfilelowr, numbfile)
else:
continue
indxfileanim = np.random.choice(indxfileanim, replace=False, size=indxfileanim.size)
cmnd = 'convert -delay 20 -density 300 -quality 100 '
for n in range(indxfileanim.size):
cmnd += '%s%s ' % (pathframextn, listfile[indxfileanim[n]])
namegiff = '%s%s.gif' % (pathanimextn, name + liststrgextn[k])
cmnd += ' ' + namegiff
print('Processing %s' % namegiff)
if not os.path.exists(namegiff):
print('Run: %s, pdf: %s' % (rtag, strgpdfn))
print('Making %s animation...' % name)
os.system(cmnd)
else:
print('GIF already exists.')
pass
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
filestat = open(pathoutprtag + 'stat.txt', 'a')
filestat.write('animfinl written.\n')
filestat.close()
def plot_samp(gdat, gdatmodi, strgstat, strgmodl, strgphas, strgpdfn='post', gdatmock=None, booltile=False):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
if not booltile:
if strgstat != 'pdfn':
numbelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
if gdatmodi is not None:
strgswep = '_%09d' % gdatmodi.cntrswep
else:
strgswep = ''
if not booltile:
# data count maps
if gdat.numbpixl > 1:
for i in gdat.indxener:
for m in gdat.indxevtt:
if gdat.boolmakeframcent and (i != gdat.numbener / 2 or m != gdat.numbevtt / 2):
continue
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpdata', i, m)
## residual count maps
for i in gdat.indxener:
for m in gdat.indxevtt:
if gdat.boolmakeframcent and (i != gdat.numbener / 2 or m != gdat.numbevtt / 2):
continue
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpresi', i, m)
if gdat.numbpixl > 1:
if gmod.numbparaelem > 0:
if gmod.boolelemlens:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelem', booltdim=True)
# temp -- restrict other plots to indxmodlelemcomp
if gdat.boolbinsener:
for specconvunit in gdat.listspecconvunit:
if not gmod.boolbfun:
plot_sbrt(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, specconvunit)
if gmod.boolapplpsfn:
plot_psfn(gdat, gdatmodi, strgstat, strgmodl)
setp_indxswepsave(gdat)
if gmod.numbparaelem > 0:
# element parameter histograms
if not (strgmodl == 'true' and gdat.typedata == 'inpt'):
limtydat = gdat.limtydathistfeat
for l in gmod.indxpopl:
strgindxydat = 'pop%d' % l
for nameparaderielemodim in gmod.namepara.derielemodim[l]:
if not (nameparaderielemodim == 'flux' or nameparaderielemodim == 'mcut' or \
nameparaderielemodim == 'deltllik' or nameparaderielemodim == 'defs' or nameparaderielemodim == 'nobj'):
continue
if gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt':
continue
indxydat = [l, slice(None)]
name = nameparaderielemodim
namepopl = nameparaderielemodim + 'pop%d' % l
lablxdat = getattr(gmod.labltotlpara, namepopl)
scalxdat = getattr(gmod.scalpara, namepopl)
limtxdat = getattr(gmod.limtpara, namepopl)
meanxdat = getattr(gdat.meanpara, name)
if gdat.numbpixl > 1:
listydattype = ['totl', 'sden']
else:
listydattype = ['totl']
for ydattype in listydattype:
## plot the surface density of elements
if ydattype == 'sden':
# plot the surface density of elements only for the amplitude feature
if nameparaderielemodim != gmod.nameparagenrelemampl:
continue
if gdat.sdenunit == 'degr':
lablydat = r'$\Sigma_{%s}$ [deg$^{-2}$]' % gmod.lablelemextn[l]
if gdat.sdenunit == 'ster':
lablydat = r'$\Sigma_{%s}$ [sr$^{-2}$]' % gmod.lablelemextn[l]
## plot the total number of elements
if ydattype == 'totl':
lablydat = r'$N_{%s}$' % gmod.lablelemextn[l]
if ydattype == 'totl' and not gdat.rtagmock is None:
listtypehist = ['hist', 'histcorrreca']
else:
listtypehist = ['hist']
boolhistprio = not booltile
for typehist in listtypehist:
if typehist == 'histcorrreca':
if gmod.numbparaelem == 0 or gdat.priofactdoff == 0.:
continue
if nameparaderielemodim == 'specplot' or nameparaderielemodim == 'spec' or nameparaderielemodim == 'deflprof':
continue
if not nameparaderielemodim in gmod.namepara.genrelem[l]:
continue
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'hist' + nameparaderielemodim + 'pop%d' % l, \
'mean' + nameparaderielemodim, scalydat='logt', lablxdat=lablxdat, \
lablydat=lablydat, histodim=True, ydattype=ydattype, \
scalxdat=scalxdat, meanxdat=meanxdat, limtydat=limtydat, \
limtxdat=limtxdat, boolhistprio=boolhistprio, \
#indxydat=indxydat, strgindxydat=strgindxydat, \
nameinte='histodim/', typehist=typehist)
if not booltile:
if gmod.numbparaelem > 0:
# element parameter correlations
for l in gmod.indxpopl:
if strgmodl != 'true' and gdat.boolinforefr and gdat.boolasscrefr:
for strgfeat in gmod.namepara.derielemodim[l]:
if not (strgfeat == 'flux' or strgfeat == 'mass' or strgfeat == 'deltllik' or strgfeat == 'nobj') and \
(gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
continue
for q in gdat.indxrefr:
if not l in gdat.refrindxpoplassc[q]:
continue
if gdat.refr.numbelem[q] == 0:
continue
if not strgfeat in gdat.refr.namepara.elem[q] or strgfeat in gdat.refr.namepara.elemonly[q][l]:
continue
plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat)
plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat, plotdiff=True)
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
# plots
for i in gdat.indxener:
for m in gdat.indxevtt:
if gmod.numbpopl > 1:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpdata', i, m, indxpoplplot=l)
## histograms of the number of counts per pixel
limtxdat = [gdat.minmpara.cntpmodl, gdat.maxmpara.cntpmodl]
for nameecom in gmod.listnameecomtotl:
name = 'histcntp' + nameecom
for m in gdat.indxevtt:
for i in gdat.indxener:
if gdat.numbener > 1:
name += 'en%02d' % (i)
if gdat.numbevtt > 1:
name += 'evt%d' % (m)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \
name, 'meancntpdata', scalydat='logt', scalxdat='logt', lablxdat=gdat.lablcnts, histodim=True, \
lablydat='$N_{pix}$', limtydat=[0.5, gdat.numbener], limtxdat=limtxdat)
## highest amplitude element
# temp
if gmod.numbparaelem > 0:
# completeness and false discovery rate
if strgmodl != 'true' and gdat.boolasscrefr:
for strgclas in ['cmpl', 'fdis']:
nameinte = strgclas + 'odim/'
limtydat = [getattr(gdat, 'minm' + strgclas), getattr(gdat, 'maxm' + strgclas)]
for l in gmod.indxpopl:
for q in gdat.indxrefr:
if not l in gdat.refrindxpoplassc[q]:
continue
if gdat.refr.numbelem[q] == 0 and strgclas == 'cmpl' or gmod.numbparaelem == 0 and strgclas == 'fdis':
continue
if strgclas == 'cmpl':
lablydat = getattr(gmod.lablpara, strgclas + 'pop%dpop%d' % (l, q))
strgindxydat = 'pop%dpop%d' % (l, q)
else:
lablydat = getattr(gmod.lablpara, strgclas + 'pop%dpop%d' % (q, l))
strgindxydat = 'pop%dpop%d' % (q, l)
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'etag':
continue
if strgclas == 'fdis' and not strgfeat in gmod.namepara.derielemodim[l]:
continue
if not strgfeat.startswith('spec') and not strgfeat.startswith('defl') \
and not strgfeat in gdat.refr.namepara.elemonly[q][l] and \
not (gdat.typedata == 'mock' and (strgfeat.endswith('pars') or strgfeat.endswith('nrel'))):
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgclas + strgfeat + strgindxydat, \
'mean' + strgfeat, lablxdat=lablxdat, \
lablydat=lablydat, \
#plottype='errr', \
scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \
omittrue=True, nameinte=nameinte)
if gmod.numbparaelem > 0:
alph = 0.1
if strgmodl == 'true':
pathtemp = gdat.pathinit
else:
if strgstat == 'this':
pathtemp = gdat.pathplotrtag + strgpdfn + '/fram/'
elif strgstat == 'mlik':
pathtemp = gdat.pathplotrtag + strgpdfn + '/finl/'
elif strgstat == 'pdfn':
pathtemp = gdat.pathplotrtag + strgpdfn + '/finl/'
colr = retr_colr(gdat, strgstat, strgmodl, indxpopl=None)
# transdimensional element parameters projected onto the data axes
if not (strgstat == 'pdfn' and not gdat.boolcondcatl):
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lght':
# PS spectra
if strgstat == 'pdfn':
specplot = [np.empty((gdat.numbenerplot, gdat.numbstkscond))]
for r in gdat.indxstkscond:
specplot[0][:, r] = gdat.dictglob['poststkscond'][r]['specplot'][0, :]
listxdat = []
listplottype = []
for k in range(specplot[l].shape[-1]):
listxdat.append(gdat.meanpara.enerplot)
listplottype.append('lghtline')
for specconvunit in gdat.listspecconvunit:
listydat = []
for k in range(specplot[l].shape[-1]):
specplottemp = specplot[l]
if strgmodl == 'true':
specplottemp = np.copy(specplottemp[0, :, k])
else:
specplottemp = np.copy(specplottemp[:, k])
if specconvunit[0] == 'en01':
specplottemp *= gdat.meanpara.enerplot
if specconvunit[0] == 'en02':
specplottemp *= gdat.meanpara.enerplot**2
if specconvunit[0] == 'en03':
# temp
pass
listydat.append(specplottemp)
lablydat = getattr(gmod.lablpara, 'flux' + specconvunit[0] + specconvunit[1] + 'totl')
strgtemp = specconvunit[0] + specconvunit[1]
if specconvunit[0] == 'en03':
strgtemp += specconvunit[2]
path = pathtemp + strgstat + 'specpop%d%s%s.pdf' % (l, strgtemp, strgswep)
limtydat = [np.amin(gdat.minmspec), np.amax(gdat.maxmspec)]
tdpy.plot_gene(path, listxdat, listydat, scalxdat='logt', scalydat='logt', \
lablxdat=gdat.lablenertotl, colr=colr, alph=alph, \
plottype=listplottype, limtxdat=[gdat.minmener, gdat.maxmener], lablydat=lablydat, \
limtydat=limtydat)
if gmod.boollenssubh:
## deflection profiles
if gdat.boolvariasca and gdat.boolvariacut:
lablxdat = gdat.labltotlpara.gang
if strgstat == 'pdfn':
deflprof = [np.empty((gdat.numbanglfull, gdat.numbstkscond))]
asca = [np.empty(gdat.numbstkscond)]
acut = [np.empty(gdat.numbstkscond)]
for r in gdat.indxstkscond:
deflprof[0][:, r] = gdat.dictglob['poststkscond'][r]['deflprof'][0, :]
asca[0][r] = gdat.dictglob['poststkscond'][r]['asca'][0]
acut[0][r] = gdat.dictglob['poststkscond'][r]['acut'][0]
for l in range(len(deflprof)):
xdat = gdat.meanpara.anglfull * gdat.anglfact
listydat = []
listvlinfrst = []
listvlinseco = []
if 'deflprof' in gmod.typeelem[l]:
if strgmodl == 'true':
deflproftemp = deflprof[l][0, :, :]
else:
deflproftemp = deflprof[l]
for k in range(deflprof[l].shape[-1]):
listydat.append(deflproftemp[:, k] * gdat.anglfact)
if strgmodl == 'true':
ascatemp = asca[l][0, k]
acuttemp = acut[l][0, k]
else:
ascatemp = asca[l][k]
acuttemp = acut[l][k]
listvlinfrst.append(ascatemp * gdat.anglfact)
listvlinseco.append(acuttemp * gdat.anglfact)
beinhost = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'paragenrscalfull', strgpdfn, indxvarb=gmod.indxpara.beinhost)
listydat.append(xdat * 0. + gdat.anglfact * beinhost)
path = pathtemp + strgstat + 'deflsubhpop%d%s.pdf' % (l, strgswep)
limtydat = [1e-3, 1.]
limtxdat = [1e-3, 1.]
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', \
lablxdat=lablxdat, drawdiag=True, limtydat=limtydat, \
limtxdat=limtxdat, colr=colr, alph=alph, lablydat=r'$\alpha$ [$^{\prime\prime}$]', \
listvlinfrst=listvlinfrst, listvlinseco=listvlinseco)
if gdat.typedata == 'mock':
# pulsar masses
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
lablxdat = gdat.labltotlpara.gang
limtydat = [gdat.minmmassshel, gdat.maxmmassshel]
lablydat = gdat.lablmassshel
name = 'massshelpop%d' % l
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
if gmod.boollens:
## radial mass budget
lablxdat = gdat.lablanglfromhosttotl
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
# host mass
for e in gmod.indxsersfgrd:
strgsersfgrd = 'isf%d' % e
limtydat = [gdat.minmmcut, getattr(gdat, 'plotmaxmmasshost' + strgsersfgrd + strgcalcmasssubh + 'bein')]
lablydat = getattr(gmod.lablpara, 'masshost' + strgsersfgrd + strgcalcmasssubh + 'totl')
name = 'masshost%s%s' % (strgsersfgrd, strgcalcmasssubh)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
if gmod.boolelemdeflsubhanyy:
# subhalo masses
limtydat = [gdat.minmmcut, getattr(gdat, 'plotmaxmmasssubh' + strgcalcmasssubh + 'bein')]
lablydat = getattr(gmod.lablpara, 'masssubh' + strgcalcmasssubh + 'totl')
name = 'masssubh%s' % (strgcalcmasssubh)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
# subhalo mass fraction
limtydat = [1e-3, 0.1]
lablydat = getattr(gmod.lablpara, 'fracsubh' + strgcalcmasssubh + 'totl')
name = 'fracsubh%s' % (strgcalcmasssubh)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'meananglhalf', scalydat='logt', \
lablxdat=lablxdat, lablydat=lablydat, limtydat=limtydat)
alph = 0.1
if gdat.boolmodipsfn and gmod.boolelempsfnanyy:
## PSF radial profile
for i in gdat.indxener:
for m in gdat.indxevtt:
indxydat = [i, slice(None), m]
strgindxydat = 'en%02devt%d' % (i, m)
lablxdat = gdat.labltotlpara.gang
limtydat= np.array([1e-3, 1e3]) * gdat.anglfact**2
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'psfn', \
'binsangl', indxydat=indxydat, strgindxydat=strgindxydat, scalydat='logt', \
lablxdat=lablxdat, lablydat=r'$\mathcal{P}$', limtydat=limtydat)
# internally and externally corrected element parameter histograms
if gdat.typedata == 'inpt' and strgstat == 'pdfn' and gdat.rtagmock is not None:
limtydat = gdat.limtydathistfeat
for l in gmod.indxpopl:
strgindxydat = 'pop%d' % l
for strgfeat in gmod.namepara.derielemodim[l]:
if strgfeat.startswith('aerr') or strgfeat == 'specplot' or strgfeat == 'spec' or strgfeat == 'deflprof':
continue
lablydat = r'$N_{%s}$' % gmod.lablelemextn[l]
for namecorr in ['incr', 'excr']:
nameinte = namecorr + 'odim/'
for qq in gdatmock.indxrefr:
if namecorr == 'excr':
if not strgfeat in gmod.namepara.extrelem[l]:
continue
q = gdat.listnamerefr.index(strgfeat[-4:])
if getattr(gdat, 'crex' + strgfeat + 'pop%dpop%dpop%d' % (q, qq, l)) is None:
continue
name = namecorr + strgfeat + 'pop%dpop%dpop%d' % (q, qq, l)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'mean' + strgfeat, scalydat='logt', lablxdat=lablxdat, \
lablydat=lablydat, histodim=True, ydattype='totl', \
scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \
nameinte=nameinte)
else:
if strgfeat in gmod.namepara.extrelem[l]:
continue
name = namecorr + strgfeat + 'pop%dpop%d' % (qq, l)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, name, 'mean' + strgfeat, scalydat='logt', lablxdat=lablxdat, \
lablydat=lablydat, histodim=True, ydattype='totl', \
scalxdat=scalxdat, limtydat=limtydat, limtxdat=limtxdat, \
nameinte=nameinte)
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
if gmod.numbparaelem > 0:
# element parameter correlations
liststrgelemtdimvarb = getattr(gdat, 'liststrgelemtdimvarb' + strgphas)
for strgelemtdimtype in gdat.liststrgelemtdimtype:
for strgelemtdimvarb in liststrgelemtdimvarb:
if strgelemtdimvarb.startswith('cmpl'):
continue
for l0 in gmod.indxpopl:
for strgfrst in gmod.namepara.genrelem[l0]:
if strgfrst.startswith('spec') or strgfrst == 'specplot' or strgfrst == 'deflprof':
continue
for strgseco in gmod.namepara.genrelem[l0]:
if strgseco.startswith('spec') or strgseco == 'specplot' or strgseco == 'deflprof':
continue
if not checstrgfeat(strgfrst, strgseco):
continue
if strgelemtdimvarb.startswith('hist'):
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%d' % l0
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst + 'pop%d' % l0, \
strgseco + 'pop%d' % l0, \
strgtotl, strgpdfn=strgpdfn)
else:
if booltile:
continue
if strgfrst.startswith('aerr') or strgseco.startswith('aerr'):
continue
if strgelemtdimvarb.startswith('fdis'):
for q in gdat.indxrefr:
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%d' % (q, l0)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)
elif strgelemtdimvarb.startswith('excr') or strgelemtdimvarb.startswith('incr'):
for qq in gdatmock.indxrefr:
if strgelemtdimvarb.startswith('excr'):
for q in gdat.indxrefr:
if getattr(gdat, 'crex' + strgfrst + strgseco + 'pop%dpop%dpop%d' % (q, qq, l0)) is None:
continue
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%dpop%d' % (q, qq, l0)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)
else:
if strgfrst[-4:] in gdat.listnamerefr and strgseco[-4:] in gdat.listnamerefr:
continue
strgtotl = strgelemtdimvarb + strgfrst + strgseco + 'pop%dpop%d' % (qq, l0)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, \
l0, strgfrst, strgseco, strgtotl, strgpdfn=strgpdfn)
if not (gdat.typedata == 'mock' and (gmod.numbelemtotl == 0 or gmod.maxmpara.numbelemtotl == 0)):
for q in gdat.indxrefr:
if strgphas == 'init' and gdat.typedata == 'mock':
continue
print('strgpdfn')
print(strgpdfn)
raise Exception('')
if booltile:
continue
for l0 in gmod.indxpopl:
for refrstrgfrst in gdat.refr.namepara.elem[q]:
if refrstrgfrst == 'spec' or refrstrgfrst == 'specplot' or refrstrgfrst == 'deflprof' or refrstrgfrst == 'etag':
continue
if refrstrgfrst in gdat.refr.namepara.elemonly[q][l0]:
continue
for refrstrgseco in gdat.refr.namepara.elem[q]:
if refrstrgseco in gdat.refr.namepara.elemonly[q][l0]:
continue
if refrstrgseco == 'spec' or refrstrgseco == 'specplot' or refrstrgseco == 'deflprof' or refrstrgseco == 'etag':
continue
if not checstrgfeat(refrstrgfrst, refrstrgseco):
continue
if refrstrgfrst.startswith('aerr') or refrstrgseco.startswith('aerr') or refrstrgfrst == 'specplot' or refrstrgseco == 'specplot':
continue
strgtotl = 'cmpl' + refrstrgfrst + refrstrgseco + 'pop%dpop%d' % (l0, q)
plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, 'bind', 'cmpl', \
q, refrstrgfrst + 'pop%d' % l0, refrstrgseco + 'pop%d' % l0, strgtotl, strgpdfn=strgpdfn)
if not booltile:
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
# data and model count scatter
for m in gdat.indxevttplot:
if gdat.numbpixl > 1:
for i in gdat.indxener:
plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, m, indxenerplot=i)
else:
plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, m)
## spatial priors
# temp
if gdat.numbpixl > 1:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for strgfeat, strgpdfn in zip(gmod.namepara.genrelemmodu[l], gmod.liststrgpdfnmodu[l]):
if strgpdfn == 'tmplreln':
plot_genemaps(gdat, gdatmodi, 'fitt', strgpdfn, 'lpdfspatpriointp', booltdim=True)
if strgpdfn == 'tmplgaum':
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'lpdfspatpriointp', booltdim=True)
# model count maps
## backgrounds
if gdat.numbpixl > 1:
for i in gdat.indxener:
for m in gdat.indxevtt:
for c in gmod.indxback:
if gmod.boolbfun:
continue
if not gmod.boolunifback[c]:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpback%04d' % c, i, m, strgcbar='cntpdata')
## count error
if strgmodl != 'true':
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.boolcalcerrr[l]:
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntperrr', i, -1, strgcbar='cntpresi')
## diffuse components
for i in gdat.indxener:
for k, name in enumerate(gmod.listnamediff):
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntp%s' % (name), i, strgcbar='cntpdata')
## model count maps
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpmodl', i, m, strgcbar='cntpdata')
# likelihood
if strgmodl != 'true':
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'llik', i, m, strgcbar='llikmaps')
if gmod.boollens:
## lensing signal to noise
if strgmodl == 'true':
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 's2nr', i, -1)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magn', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'conv', booltdim=True)
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplens', i, strgcbar='cntpdata', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplensgradmgtd', i, strgcbar='cntpdata', booltdim=True)
if gdat.penalpridiff:
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \
'psecodimdatapntsen%02devt%d' % (i, m), 'meanmpolodim', lablxdat='$l$', lablydat='$P_{resi}(l)$', \
limtydat=[1e-2, 2.], scalxdat='logt', scalydat='logt')
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'psecodimdatapntsprioen%02devt%d' % (i, m), 'meanmpolodim', lablxdat='$l$', \
lablydat='$P_{prio}(l)$', limtydat=[1e-2, 2.], scalxdat='logt', scalydat='logt')
if gmod.boollens:
indxydat = [slice(None)]
strgindxydat = ''
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convpsecodim', 'meanwvecodim', lablxdat='$k$ [1/kpc]', lablydat='$P(k)$', limtydat=[1e-1, 1e2], \
scalxdat='logt', scalydat='logt', indxydat=indxydat, strgindxydat=strgindxydat)
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'histdefl', 'meandefl', \
scal='self', lablxdat=r'$\alpha$ [arcsec]', lablydat=r'$N_{pix}$', \
strgindxydat=strgindxydat, indxydat=indxydat, histodim=True)
if gmod.numbparaelem > 0 and gmod.boolelemdeflsubhanyy:
indxydat = [slice(None)]
strgindxydat = ''
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convpsecelemodim', 'meanwvecodim', lablxdat='$k$ [1/kpc]', lablydat='$P_{sub}(k)$', \
strgindxydat=strgindxydat, indxydat=indxydat, limtydat=[1e-5, 1e-1], scalxdat='logt', scalydat='logt')
plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'histdeflsubh', 'meandeflsubh', scal='self', lablxdat=r'$\alpha$ [arcsec]', \
strgindxydat=strgindxydat, indxydat=indxydat, lablydat=r'$N_{pix}$', histodim=True)
if gmod.boollens:
for i in gdat.indxener:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrd', i, -1, strgcbar='cntpdata')
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrdgalx', i, -1, strgcbar='cntpdata')
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntpbgrdexts', i, -1, strgcbar='cntpdata')
# gradient of the lens emission
for i in gdat.indxener:
for m in gdat.indxevtt:
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'cntplensgrad', indxenerplot=i, indxevttplot=m)
if not (gdat.boolshrtfram and strgstat == 'this' and strgmodl == 'fitt'):
if gmod.boollens:
# overall deflection field
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, multfact=0.1)
# deflection field due to individual lenses
for k in range(numbdeflsingplot):
if k == 0:
multfact = 0.1
elif k == 1:
multfact = 1.
elif k >= 2:
multfact = 10.
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, indxdefl=k, multfact=multfact)
# residual deflection field
if strgmodl == 'fitt' and gdat.typedata == 'mock':
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, nameparagenrelem='resi', multfact=100.)
if strgstat != 'pdfn':
for k in range(numbsingcomm):
plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, nameparagenrelem='resi', indxdefl=k, multfact=100.)
if gdat.numbpixl > 1:
if gmod.numbparaelem > 0:
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelemresi', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'convelemresiperc', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magnresi', booltdim=True)
plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, 'magnresiperc', booltdim=True)
def dele_rtag(rtag):
pathdata = pathpcat + '/data/outp/'
pathimag = pathpcat + '/imag/'
cmnd = 'rm -rf %s%s' % (pathdata, rtag)
print(cmnd)
os.system(cmnd)
cmnd = 'rm -rf %s%s' % (pathimag, rtag)
os.system(cmnd)
print(cmnd)
def plot_infopvks(gdat, gdatprio, name, namefull, nameseco=None):
pvks = getattr(gdat, 'pvks' + namefull)
info = getattr(gdat, 'info' + namefull)
path = gdat.pathinfo + 'info' + namefull
if nameseco is not None:
indxpoplfrst = int(namefull[-1])
# information gain
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.pcolor(varbfrst, varbseco, info, cmap='Greys')
plt.colorbar(imag)
plot_sigmcont(gdat.fitt, '', axis, name, indxpoplfrst, strgseco=nameseco)
if scalfrst == 'logt':
axis.set_xscale('log')
if scalseco == 'logt':
axis.set_yscale('log')
axis.set_xlabel(getattr(gdat.labltotlpara, name))
axis.set_ylabel(getattr(gdat.labltotlpara, nameseco))
axis.set_xlim(limtfrst)
axis.set_ylim(limtseco)
plt.tight_layout()
plt.savefig(path)
plt.close(figr)
# KS test p value
pathpvkstdim = gdat.pathinfo + 'pvks' + namefull
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.pcolor(varbfrst, varbseco, pvks, cmap='Greys')
plt.colorbar(imag)
plot_sigmcont(gdat.fitt, '', axis, name, indxpoplfrst, strgseco=nameseco)
if scalfrst == 'logt':
axis.set_xscale('log')
if scalseco == 'logt':
axis.set_yscale('log')
axis.set_xlabel(getattr(gdat.labltotlpara, name))
axis.set_ylabel(getattr(gdat.labltotlpara, nameseco))
axis.set_xlim(limtfrst)
axis.set_ylim(limtseco)
plt.tight_layout()
plt.savefig(pathpvkstdim)
plt.close(figr)
elif name != namefull:
lablydat = '$D_{KL}$'
lablxdat = getattr(gmod.lablpara, name + 'totl')
xdat = getattr(gdat, 'mean' + name)
ydat = getattr(gdat, 'info' + namefull)
tdpy.mcmc.plot_plot(path, xdat, ydat, lablxdat, lablydat, scal)
ydat = getattr(gdat, 'pvks' + namefull)
pathpvks = gdat.pathinfo + 'pvks' + namefull
tdpy.mcmc.plot_plot(pathpvks, xdat, ydat, lablxdat, '$p_{KS}$', scal)
else:
# horizontal axis
xdat = getattr(gdat, 'mean' + name)
lablxdat = getattr(gmod.lablpara, name + 'totl')
# scaling
scal = getattr(gdat, 'scal' + name)
# common title
titl = '$D_{KL} = %.3g$, KS = %.3g $\sigma$' % (info, pvks)
# DKL density
pathdinf = gdat.pathinfo + 'dinf' + namefull
ydat = getattr(gdat, 'infodens' + namefull)
lablydat = r'$\rho_{D_{KL}}$'
tdpy.mcmc.plot_plot(pathdinf, xdat, ydat, lablxdat, lablydat, scal, titl=titl)
# prior and posterior PDFs
pathpdfn = gdat.pathinfo + 'pdfn' + namefull
lablydat = r'$P$'
ydat = [getattr(gdat, 'pdfnpost' + namefull), getattr(gdatprio, 'pdfnprio' + namefull)]
legd = ['$P$(%s|$D$)' % lablxdat, '$P$(%s)' % lablxdat]
tdpy.mcmc.plot_plot(pathpdfn, xdat, ydat, lablxdat, lablydat, scal, colr=['k', 'k'], linestyl=['-', '--'], legd=legd, titl=titl)
def plot_finl(gdat=None, gdatprio=None, rtag=None, strgpdfn='post', gdatmock=None, booltile=None):
if gdat.typeverb > 0:
print('plot_finl()')
print('Producing postprocessing plots...')
timetotlinit = gdat.functime()
gdat.strgbest = 'ML'
if not booltile:
# terms in the log-acceptance probability
listindxsamptotlproptotl = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlproptotl')
listindxsamptotlpropaccp = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlpropaccp')
listindxsamptotlpropreje = getattr(gdat, 'list' + strgpdfn + 'indxsamptotlpropreje')
for n in gdat.indxproptype:
pathbase = getattr(gdat, 'path' + strgpdfn + 'finl%s' % gdat.nameproptype[n])
for k in gdat.indxtermlacp:
varb = getattr(gdat, 'list' + strgpdfn + gdat.listnametermlacp[k])
labl = gdat.listlabltermlacp[k]
if listindxsamptotlproptotl[n].size > 0 and (varb[listindxsamptotlproptotl[n]] != 0.).any():
path = pathbase + gdat.listnametermlacp[k] + 'totl'
tdpy.mcmc.plot_trac(path, varb[listindxsamptotlproptotl[n]], labl, titl=gdat.nameproptype[n] + ', Total')
if listindxsamptotlpropaccp[n].size > 0 and (varb[listindxsamptotlpropaccp[n]] != 0.).any():
path = pathbase + gdat.listnametermlacp[k] + 'accp'
tdpy.mcmc.plot_trac(path, varb[listindxsamptotlpropaccp[n]], labl, titl=gdat.nameproptype[n] + ', Accepted')
if listindxsamptotlpropreje[n].size > 0 and (varb[listindxsamptotlpropreje[n]] != 0.).any():
path = pathbase + gdat.listnametermlacp[k] + 'reje'
tdpy.mcmc.plot_trac(path, varb[listindxsamptotlpropreje[n]], labl, titl=gdat.nameproptype[n] + ', Rejected')
if gdat.checprio and strgpdfn == 'post' and not booltile:
# this works only for scalar variables -- needs to be generalized to all variables
if gdatprio is None:
pathoutprtag = retr_pathoutprtag(pathpcat, rtag)
path = pathoutprtag + 'gdatfinlprio'
gdatprio = readfile(path)
for namevarbscal in gmod.namepara.scal:
plot_infopvks(gdat, gdatprio, namevarbscal, namevarbscal)
for l in gmod.indxpopl:
for strgfeatfrst in gmod.namepara.genrelem[l]:
if strgfeatfrst == 'spec' or strgfeatfrst == 'deflprof' or strgfeatfrst == 'specplot':
continue
plot_infopvks(gdat, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + 'pop%d' % l)
for strgfeatseco in gmod.namepara.genrelem[l]:
if strgfeatseco == 'spec' or strgfeatseco == 'deflprof' or strgfeatseco == 'specplot':
continue
if not checstrgfeat(strgfeatfrst, strgfeatseco):
continue
plot_infopvks(gdat, gdatprio, strgfeatfrst, 'hist' + strgfeatfrst + strgfeatseco + 'pop%d' % l, nameseco=strgfeatseco)
listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrscalbase = getattr(gdat, 'list' + strgpdfn + 'paragenrscalbase')
listboolpropfilt = getattr(gdat, 'list' + strgpdfn + 'boolpropfilt')
listmemoresi = getattr(gdat, 'list' + strgpdfn + 'memoresi')
listindxproptype = getattr(gdat, 'list' + strgpdfn + 'indxproptype')
listsampproc = getattr(gdat, 'list' + strgpdfn + 'sampproc')
# Gelman-Rubin test
pathdiag = getattr(gdat, 'path' + strgpdfn + 'finldiag')
if gdat.numbproc > 1:
if np.isfinite(gdat.gmrbstat).all():
if gdat.typeverb > 0:
print('Gelman-Rubin TS...')
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
minm = min(np.amin(gdat.gmrbstat), np.amin(gdat.gmrbparagenrscalbase))
maxm = max(np.amax(gdat.gmrbstat), np.amax(gdat.gmrbparagenrscalbase))
bins = np.linspace(minm, maxm, 40)
axis.hist(gdat.gmrbstat.flatten(), bins=bins, label='Data proj.')
axis.hist(gdat.gmrbparagenrscalbase, bins=bins, label='Fixed dim.')
axis.set_xlabel('PSRF')
axis.set_ylabel('$N_{stat}$')
plt.tight_layout()
figr.savefig(pathdiag + 'gmrbhist.pdf')
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gmod.indxparagenrbase, gdat.gmrbparagenrscalbase)
axis.set_xticklabels(gmod.labltotlpara.genrbase)
axis.set_ylabel('PSRF')
plt.tight_layout()
figr.savefig(pathdiag + 'gmrbparagenrscalbase.pdf')
plt.close(figr)
for i in gdat.indxener:
for m in gdat.indxevtt:
maps = gdat.gmrbstat[i, :, m]
path = pathdiag + 'gmrbdataen%02devt%d.pdf' % (i, m)
tdpy.plot_maps(path, maps, indxpixlrofi=gdat.indxpixlrofi, numbpixl=gdat.numbpixlfull, typepixl=gdat.typepixl, \
minmlgal=gdat.anglfact*gdat.minmlgal, maxmlgal=gdat.anglfact*gdat.maxmlgal, \
minmbgal=gdat.anglfact*gdat.minmbgal, maxmbgal=gdat.anglfact*gdat.maxmbgal)
else:
print('Inappropriate Gelman-Rubin test statistics encountered.')
# plot autocorrelation
if gdat.typeverb > 0:
print('Autocorrelation...')
tdpy.mcmc.plot_atcr(pathdiag, gdat.atcrcntp[0, 0, 0, 0, :], gdat.timeatcrcntp[0, 0, 0, 0], strgextn='cntp')
tdpy.mcmc.plot_atcr(pathdiag, gdat.atcrpara[0, 0, :], gdat.timeatcrpara[0, 0], strgextn='para')
print('Autocorrelation times:')
for k, namepara in enumerate(gmod.namepara):
print('%s %g' % (namepara, np.mean(gdat.timeatcrpara[:, k])))
# plot proposal efficiency
if gdat.typeverb > 0:
print('Acceptance ratio...')
numbtimemcmc = 20
binstimemcmc = np.linspace(0., gdat.numbswep, numbtimemcmc)
numbtick = 2
sizefigrydat = 4. * gdat.numbproptype
figr, axgr = plt.subplots(gdat.numbproptype, 1, figsize=(12., sizefigrydat), sharex='all')
if gdat.numbproptype == 1:
axgr = [axgr]
for n, axis in enumerate(axgr):
histtotl = axis.hist(listindxsamptotlproptotl[n], bins=binstimemcmc)[0]
histaccp = axis.hist(listindxsamptotlpropaccp[n], bins=binstimemcmc)[0]
axis.set_ylabel('%s' % gdat.nameproptype[n])
if k == gdat.numbproptype - 1:
axis.set_xlabel('$i_{samp}$')
plt.tight_layout()
figr.savefig(pathdiag + 'accpratiproptype.pdf')
plt.close(figr)
if gdat.typeverb > 0:
print('Proposal execution times...')
## time performance
#listchro = np.empty((gdat.numbswep, gdat.numbchro))
#listchro = []
#for k, name in enumerate(gdat.listnamechro):
# #listchro[:, k] = getattr(gdat, 'list' + strgpdfn + 'chro' + name).flatten() * 1e3
# listchro.append(getattr(gdat, 'list' + strgpdfn + 'chro' + name).flatten() * 1e3)
#pathdiag = getattr(gdat, 'path' + strgpdfn + 'finldiag')
#figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
#axis.violin(listchro)
#axis.set_yscale('log')
#axis.set_ylabel('$t$ [ms]')
#axis.set_xticklabels(gdat.listlablchro)
#axis.axvline(mean(chro), ls='--', alpha=0.2, color='black')
#figr.savefig(pathdiag + 'chro.pdf' % gdat.listnamechro[k])
#plt.close(figr)
# temp
gdat.lablpmea = 'Mean'
# posterior versions of the frame plots
plot_samp(gdat, None, 'pdfn', 'fitt', 'finl', strgpdfn=strgpdfn, gdatmock=gdatmock, booltile=booltile)
if booltile:
return
if gmod.numbparaelem > 0:
if gdat.typeverb > 0:
print('A mosaic of samples...')
## mosaic of images of posterior catalogs
if gdat.numbpixl > 1:
plot_mosa(gdat, strgpdfn)
## randomly selected trandimensional parameters
if gmod.numbparaelem > 0:
if gdat.typeverb > 0:
print('Transdimensional parameters...')
# choose the parameters based on persistence
stdvlistsamptran = np.std(listparagenrscalfull[:, gmod.indxsamptrap], axis=0)
indxtrapgood = np.where(stdvlistsamptran > 0.)[0]
gmod.numbparaelemgood = indxtrapgood.size
gmod.numbparaelemplot = min(3, gmod.numbparaelemgood)
if gmod.numbparaelemplot > 0:
indxtrapplot = np.sort(np.random.choice(gmod.indxsamptrap[indxtrapgood], size=gmod.numbparaelemplot, replace=False))
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
tdpy.mcmc.plot_grid(path, 'listelemfrst', listparagenrscalfull[:, gmod.indxsamptrap[:3]], [gmod.lablpara[k] for k in gmod.indxsamptrap[:3]])
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
tdpy.mcmc.plot_grid(path, 'listsamp', listparagenrscalfull[:, indxtrapplot], ['%d' % k for k in indxtrapplot])
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
tdpy.mcmc.plot_grid(path, 'listsamp', listparagenrscalfull[:, indxtrapplot], [gmod.lablpara[k] for k in indxtrapplot])
if gdat.typeverb > 0:
print('Scalar variables...')
# scalar variables
## trace and marginal distribution of each parameter
for name in gmod.namepara.scal:
if gdat.typeverb > 0:
print('Working on %s...' % name)
scal = getattr(gdat, 'scal' + name)
corr = getattr(gdat, 'corr' + name)
if corr is None:
truepara = None
else:
truepara = getattr(gdat, 'corr' + name)
listvarb = getattr(gdat, 'list' + strgpdfn + name)
if listvarb.ndim != 1:
if listvarb.shape[1] == 1:
listvarb = listvarb[:, 0]
else:
raise Exception('')
mlik = getattr(gdat, 'mlik' + name)
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscaltrac') + name
tdpy.mcmc.plot_trac(path, listvarb, labltotl, truepara=truepara, scalpara=scal, listvarbdraw=[mlik], listlabldraw=[''], listcolrdraw=['r'])
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalhist') + name
tdpy.mcmc.plot_hist(path, listvarb, labltotl, truepara=truepara, scalpara=scal, listvarbdraw=[mlik], listlabldraw=[''], listcolrdraw=['r'])
for nameseco in gmod.namepara.scal:
if name == nameseco:
continue
if gdat.typeverb > 0:
print('Working on correlation of %s with %s...' % (name, nameseco))
pathjoin = getattr(gdat, 'path' + strgpdfn + 'finlvarbscaljoin')
if corrseco is None:
trueparaseco = None
else:
trueparaseco = getattr(gdat, 'corr' + nameseco)
if listvarbseco.ndim != 1:
if listvarbseco.shape[1] == 1:
listvarbseco = listvarbseco[:, 0]
else:
raise Exception('')
listjoin = np.vstack((listvarb, listvarbseco)).T
tdpy.mcmc.plot_grid(pathjoin, name + nameseco, listjoin, [labltotl, labltotlseco], scalpara=[scal, scalseco], truepara=[truepara, trueparaseco], \
join=True, listvarbdraw=[np.array([mlik, mlikseco])])
if gdat.typeverb > 0:
print('Fixed dimensional parameter covariance...')
### covariance
## overall
path = getattr(gdat, 'path' + strgpdfn + 'finlvarbscalcova')
truepara = gmod.corrparagenrscalbase
mlikpara = gdat.mlikparagenrscalbase
tdpy.mcmc.plot_grid(path, 'paragenrscalbase', listparagenrscalbase, gmod.labltotlpara.genrbasetotl, truepara=truepara, listvarbdraw=[mlikpara])
# stacked posteiors binned in position and flux
if gmod.numbparaelem > 0 and gdat.numbpixl > 1:
liststrgbins = ['quad', 'full']
for l in gmod.indxpopl:
plot_histlgalbgalelemstkd(gdat, strgpdfn, l, 'cumu')
for strgbins in liststrgbins:
plot_histlgalbgalelemstkd(gdat, strgpdfn, l, strgbins, namepara.elemsign[l])
if gdat.typeverb > 0:
print('Prior and likelihood...')
for strgpdfntemp in ['lpritotl', 'lliktotl']:
if strgpdfntemp == 'lpritotl':
labltemp = '\ln P(M)'
if strgpdfntemp == 'lliktotl':
labltemp = '\ln P(D|M)'
labl = r'$%s$' % labltemp
path = getattr(gdat, 'path' + strgpdfn + 'finl') + strgpdfntemp
varb = getattr(gdat, 'list' + strgpdfn + strgpdfntemp)
tdpy.mcmc.plot_hist(path, varb, labl)
listvarbdraw = []
listlabldraw = []
listcolrdraw = []
if gdat.typedata == 'mock':
listvarbdraw += [getattr(gdat.true, strgpdfntemp)]
listlabldraw += ['True model']
listcolrdraw += [gdat.refr.colr]
tdpy.mcmc.plot_trac(path, getattr(gdat, 'list' + strgpdfn + strgpdfntemp), labl, \
listvarbdraw=listvarbdraw, listlabldraw=listlabldraw, listcolrdraw=listcolrdraw)
# plot resident memory
figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
axis.plot(gdat.indxswep, np.mean(listmemoresi, 1) / float(2**30))
axis.set_ylabel(r'$M$ [GB]')
axis.set_xlabel(r'$i_{samp}$')
plt.tight_layout()
figr.savefig(pathdiag + 'memoresi.pdf')
plt.close(figr)
timetotlfinl = gdat.functime()
if gdat.typeverb > 0:
print('Plots and animations are produced in %.3g seconds.' % (timetotlfinl - timetotlinit))
def plot_sbrt(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, specconvunit):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
for b, namespatmean in enumerate(gdat.listnamespatmean):
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
# plot reference spectra
if gdat.listprefsbrtlabltotl is not None:
for k in range(len(gdat.listprefsbrtlabltotl)):
if gdat.listprefsbrttype[k] == 'shad':
factenerrefr = [[] for a in range(3)]
for a in range(3):
factenerrefr[a] = retr_factener(specconvunit[0], gdat.listprefsbrtener[k][a])
axis.plot(gdat.listprefsbrtener[k][0], gdat.listprefsbrtsbrt[k][0] * factenerrefr[0], color='m', label=gdat.listprefsbrtlabltotl[k])
enerpoly = np.empty(gdat.listprefsbrtener[k][1].size + gdat.listprefsbrtener[k][2].size)
enerpoly[:gdat.listprefsbrtener[k][1].size] = gdat.listprefsbrtener[k][1]
enerpoly[gdat.listprefsbrtener[k][1].size:] = gdat.listprefsbrtener[k][2][::-1]
sbrtpoly = np.empty(gdat.listprefsbrtener[k][1].size + gdat.listprefsbrtener[k][2].size)
sbrtpoly[:gdat.listprefsbrtener[k][1].size] = gdat.listprefsbrtsbrt[k][1] * factenerrefr[1]
sbrtpoly[gdat.listprefsbrtener[k][1].size:] = gdat.listprefsbrtsbrt[k][2][::-1] * factenerrefr[2][::-1]
axis.fill(enerpoly, sbrtpoly, color='m', alpha=0.5)
else:
factenerrefr = retr_factener(specconvunit[0], gdat.listprefsbrtener[k][1])
axis.errorbar(gdat.listprefsbrtener[k][1], gdat.listprefsbrtsbrt[k][1] * factenerrefr, label=gdat.listprefsbrtlabltotl[k], color='m')
if strgmodl == 'true':
liststrgmodl = [strgmodl]
listgdatobjt = [gdat]
if strgmodl == 'fitt' and (strgstat == 'this' or strgstat == 'pdfn'):
if gdat.typedata == 'mock':
liststrgmodl = [strgmodl, 'true']
listgdatobjt = [gdatobjt, gdat]
else:
liststrgmodl = [strgmodl]
listgdatobjt = [gdatobjt]
numbstrgstattemp = len(liststrgmodl)
for a in range(numbstrgstattemp):
indxploteleminit = []
indxplotelemendd = []
# number of transdimensional elements to be overplotted
numbelemtemp = 0
if gdat.numbpixl == 1 and strgstat != 'pdfn':
if liststrgmodl[a] == 'fitt':
numbelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
numbelemtemp += np.sum(gmodstat.numbelem[l])
else:
for q in gdat.indxrefr:
numbelemtemp += np.sum(gdat.refr.numbelem[q])
numbplot = numblablsbrtspec + numbelemtemp
listydat = np.zeros((numbplot, gdat.numbener))
listyerr = np.zeros((2, numbplot, gdat.numbener))
cntr = 0
cntrdata = cntr
## data
listydat[cntr, :] = gdat.sbrtdatamean[b]
listyerr[:, cntr, :] = gdat.sbrtdatastdv[b]
cntr += 1
for c in gmod.indxback:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtback%04dmea%d' % (c, b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtback%04dmea%d' % (c, b), strgpdfn, strgmome='errr')
cntr += 1
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy and not (liststrgmodl[a] == 'true' and gdat.refr.numbelemtotl == 0):
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncsubtmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtdfncsubtmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrthostisf%dmea%d' % (e, b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], \
'sbrthostisf%dmea%d' % (e, b), strgpdfn, strgmome='errr')
cntr += 1
if gmod.boollens:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtlensmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtlensmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
if gdat.numbpixl == 1 and strgstat != 'pdfn':
cntrline = cntr
indxploteleminit.append(cntr)
for l in gmod.indxpopl:
if liststrgmodl[a] == 'true':
for k in range(gmod.numbelem[l]):
listydat[cntr, :] = getattr(listgdatobjt[a], liststrgmodl[a] + 'spec')[l][0, :, k]
if cntr == cntrline:
listlablsbrtspec = listlablsbrtspec[:cntr] + ['Lines'] + listlablsbrtspec[cntr:]
else:
listlablsbrtspec = listlablsbrtspec[:cntr] + [None] + listlablsbrtspec[cntr:]
cntr += 1
if k == gmod.numbelem[l] - 1:
indxplotelemendd.append(k)
else:
for k in range(gmodstat.numbelem[l]):
listydat[cntr, :] = getattr(listgdatobjt[a], strgstat + 'spec')[l][:, k]
if cntr == cntrline:
listlablsbrtspec = listlablsbrtspec[:cntr] + ['Lines'] + listlablsbrtspec[cntr:]
else:
listlablsbrtspec = listlablsbrtspec[:cntr] + [None] + listlablsbrtspec[cntr:]
cntr += 1
if k == gmodstat.numbelem[l] - 1:
indxplotelemendd.append(k)
## total model
if numblablsbrt > 1:
listydat[cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtmodlmea%d' % (b), strgpdfn)
if strgstat == 'pdfn':
listyerr[:, cntr, :] = retr_fromgdat(gdat, gdatmodi, strgstat, liststrgmodl[a], 'sbrtmodlmea%d' % (b), strgpdfn, strgmome='errr')
cntr += 1
if liststrgmodl[a] == 'true':
listyerr = np.zeros((2, numbplot, gdat.numbener))
# plot energy spectra of the data, background model components and total background
if gdat.numbener > 1:
listmrkr = ['o', '>', 's', 'h', '*', 'p', 'x']
for k in range(100):
listmrkr.append('x')
# determine the energy scaling factor
if specconvunit[0] == 'en00':
factener = 1.
if specconvunit[0] == 'en01':
factener = gdat.meanpara.ener
if specconvunit[0] == 'en02':
factener = gdat.meanpara.ener**2
if specconvunit[0] == 'en03':
# temp
pass
factener = 1.
#indxenerintv = np.where((gdat.meanpara.ener < specconvunit[4]) & (gdat.meanpara.ener > specconvunit[3]))[0]
#ener = np.concatenate((np.array([specconvunit[3]]), gdat.meanpara.ener[indxenerintv], np.array([specconvunit[4]])))
#
#for k in range(3):
# if k == 0:
# ydattemp =
# ydatminmener = np.interp(specconvunit[3], gdat.meanpara.ener, ydat)
# ydatmaxmener = np.interp(specconvunit[4], gdat.meanpara.ener, ydat)
# ydat = np.concatenate((np.array([ydatminmener]), ydat[indxenerintv], np.array([ydatmaxmener])))
# ydat = np.trapz(ydat, gdat.meanpara.ener)
#
#yerrminmener = np.interp(specconvunit[3], gdat.meanpara.ener, yerr, axis=1)
#yerrmaxmener = np.interp(specconvunit[4], gdat.meanpara.ener, yerr, axis=1)
#ydat = np.stack((np.array([yerrminmener]), ydat[indxenerintv], np.array([yerrmaxmener])))
#
#
#yerr = np.trapz(yerr, gdat.meanpara.ener)
xdat = gdat.meanpara.ener
cntr = 0
for k in range(listydat.shape[0]):
mrkr = listmrkr[cntr]
if k == cntrdata:
colr = 'black'
alph = 1.
linestyl = '-'
else:
colr = retr_colr(gdat, strgstat, liststrgmodl[a], indxpopl=None)
linestyl = '--'
alph = 0.5
ydat = np.copy(listydat[k, :])
yerr = np.copy(listyerr[:, k, :])
ydat *= factener
yerr *= factener
if k == cntrdata and a > 0:
continue
if liststrgmodl[a] == 'fitt':
labl = listlablsbrtspec[k]
else:
labl = None
temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, color=colr, marker=mrkr, ls=linestyl, markersize=10, alpha=alph, label=labl)
for caps in listcaps:
caps.set_markeredgewidth(1)
if gdat.numbpixl == 1 and strgstat != 'pdfn':
if cntr != cntrline or k in indxplotelemendd:
cntr += 1
else:
cntr += 1
if gdat.numbener > 1:
axis.set_xlim([np.amin(gdat.binspara.ener), np.amax(gdat.binspara.ener)])
if gdat.typeexpr == 'chan':
factminm = 1e-1
factmaxm = 1e2
elif gdat.typeexpr == 'ferm':
factminm = 1e1
factmaxm = 1e-1
else:
factminm = 1e-4
factmaxm = 1e0
minmydat = factminm * gdat.factylimtbrt[0] * np.amax(listydat[cntrdata, :] * factener)
maxmydat = factmaxm * gdat.factylimtbrt[1] * np.amax(listydat[cntrdata, :] * factener)
limtydat = [minmydat, maxmydat]
axis.set_ylim(limtydat)
axis.set_yscale('log')
axis.set_xlabel(gdat.lablenertotl)
axis.set_xscale('log')
labl = getattr(gmod.lablpara, 'sbrt' + specconvunit[0] + specconvunit[1] + 'stertotl')
axis.set_ylabel(labl)
make_legd(axis, numbcols=2)
plt.tight_layout()
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'sdenmean%s%s%s' % (namespatmean, specconvunit[0], specconvunit[1]))
figr.savefig(path)
plt.close(figr)
def retr_factener(strgconvunit, ener):
if strgconvunit == 'en00':
factener = np.ones_like(ener)
if strgconvunit == 'en01':
factener = ener
if strgconvunit == 'en02':
factener = ener**2
if strgconvunit == 'en03':
# temp
pass
factener = np.ones_like(ener)
return factener
def plot_pdfntotlflux():
minm = 1e-9
maxm = 10e-9
numbvarb = 90
numbparagenrfull = 100000
numbbins = 40
alph = 0.5
binssing = np.linspace(minm, maxm, numbvarb + 1)
meansing = (binssing[:-1] + binssing[1:]) / 2.
deltsing = binssing[1:] - binssing[:-1]
binsdoub = np.linspace(2. * minm, 2. * maxm, 2 * numbvarb)
meandoub = (binsdoub[:-1] + binsdoub[1:]) / 2.
deltdoub = binsdoub[1:] - binsdoub[:-1]
bins = np.linspace(minm, 2. * maxm, 2 * numbvarb + 1)
arry = np.empty((2, numbparagenrfull))
minmslop = 1.5
maxmslop = 3.
numbslop = 4
sloparry = np.linspace(minmslop, maxmslop, numbslop)
for n in range(numbslop):
slop = sloparry[n]
for k in range(2):
arry[k, :] = (np.random.rand(numbparagenrfull) * (maxm**(1. - slop) - minm**(1. - slop)) + minm**(1. - slop))**(1. / (1. - slop))
totl = np.sum(arry, 0)
powrprob = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop)) * meansing**(-slop)
convprob = convolve(powrprob, powrprob) * deltdoub[0]
indxdoub = np.where(meandoub <= maxm)[0]
convprobpoly = polyval(polyfit(meandoub[indxdoub], convprob[indxdoub], 8), meandoub[indxdoub])
figr, axis = plt.subplots()
axis.hist(arry[k, :], bins=bins, alpha=alph, label='$f_1$ (Sampled)', color='b')
axis.hist(totl, bins=bins, alpha=alph, label='$f_0$ (Sampled)', color='g')
axis.plot(meansing, powrprob * numbparagenrfull * deltsing, label='$f_1$ (Analytic)', color='b')
axis.plot(meandoub, convprob * numbparagenrfull * deltdoub[0], label='$f_0$ (Numerically convolved)', color='g')
axis.plot(meandoub[indxdoub], convprobpoly * numbparagenrfull * deltdoub[indxdoub], label='$f_0$ (Fit)', color='r')
axis.set_ylim([0.5, numbsamp])
axis.set_xlabel('$f$')
axis.set_xlim([np.amin(bins), np.amax(bins)])
axis.set_xscale('log')
axis.set_yscale('log')
axis.set_ylabel('$N_{samp}$')
make_legd(axis)
plt.tight_layout()
pathfold = os.environ["TDGU_DATA_PATH"] + '/imag/powrpdfn/'
figr.savefig(pathfold + 'powrpdfn%04d.pdf' % n)
plt.close(figr)
def savefigr(gdat, gdatmodi, figr, path):
#if gdatmodi is not None and gdat.numbproc > 1:
# gdatmodi.lock.acquire()
# print 'Process %d acquiring the lock...' % gdatmodi.indxprocwork
plt.savefig(path)
#if gdatmodi is not None and gdat.numbproc > 1:
# gdatmodi.lock.release()
# print 'Process %d releasing the lock...' % gdatmodi.indxprocwork
def plot_elemtdim(gdat, gdatmodi, strgstat, strgmodl, strgelemtdimtype, strgelemtdimvarb, indxpoplfrst, strgfrst, \
strgseco, strgtotl, strgmome='pmea', strgpdfn='post'):
gmod = getattr(gdat, strgmodl)
sizelarg = 10
sizesmll = 1
if strgstat == 'pdfn':
lablmome = getattr(gdat, 'labl' + strgmome)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
if strgmodl == 'fitt':
colrtemp = gmod.colrelem[indxpoplfrst]
if strgstat == 'pdfn':
labl = gdat.lablsampdist + ' ' + lablmome
if strgelemtdimtype == 'bind':
varb = getattr(gdat, strgmome + strgpdfn + strgtotl)
varbfrst = gdat.binspara.strgfrst
varbseco = getattr(gdat.binspara, strgseco)
if strgtotl.startswith('hist') or strgtotl.startswith('exr') or strgtotl.startswith('incr') or np.amax(varb) <= 0.:
normtdim = None
else:
normtdim = mpl.colors.LogNorm(0.5, vmax=np.amax(varb))
imag = axis.pcolor(varbfrst, varbseco, varb.T, cmap='Blues', label=labl, norm=normtdim)
make_cbar(gdat, axis, imag)
else:
if gdat.boolcondcatl:
varbfrst = np.zeros(gdat.numbprvlhigh)
varbseco = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
varbfrst[cntr] = gdat.dictglob['poststkscond'][r][strgfrst][indxpoplfrst]
varbseco[cntr] = gdat.dictglob['poststkscond'][r][strgseco][indxpoplfrst]
cntr += 1
axis.scatter(varbfrst, varbseco, alpha=gdat.alphelem, color=colrtemp, label=gdat.lablparagenrscalfull)
if strgstat == 'this' or strgstat == 'mlik':
if strgelemtdimtype == 'bind':
meanfrst = getattr(gdat.binspara, strgfrst)
meanseco = getattr(gdat.binspara, strgseco)
hist = getattr(gdatmodi, strgstat + strgtotl)
if strgtotl.startswith('hist') or strgtotl.startswith('exr') or strgtotl.startswith('incr') or np.amax(hist) <= 0.:
normtdim = None
else:
normtdim = mpl.colors.LogNorm(0.5, vmax=np.amax(hist))
imag = axis.pcolor(meanfrst, meanseco, hist.T, cmap='Blues', label=gdat.lablparagenrscalfull, alpha=gdat.alphhist, norm=normtdim)
else:
varbfrst = getattr(gdatmodi.this, strgfrst)[indxpoplfrst]
varbseco = getattr(gdatmodi.this, strgseco)[indxpoplfrst]
if len(varbfrst) == 0 or len(varbseco) == 0:
varbfrst = np.array([limtfrst[0] * 0.1])
varbseco = np.array([limtseco[0] * 0.1])
axis.scatter(varbfrst, varbseco, alpha=gdat.alphelem, color=colrtemp, label=gdat.lablparagenrscalfull)
# reference elements
if strgfrst[-4:] in gdat.listnamerefr:
strgfrsttemp = strgfrst[-4:]
else:
strgfrsttemp = strgfrst
if strgseco[-4:] in gdat.listnamerefr:
strgsecotemp = strgseco[-4:]
else:
strgsecotemp = strgseco
if hasattr(gdat.refr, strgfrsttemp) and hasattr(gdat.refr, strgsecotemp):
for q in gdat.indxrefr:
if strgfrsttemp in gdat.refr.namepara.elem[q] and strgsecotemp in gdat.refr.namepara.elem[q]:
refrvarbfrst = getattr(gdat.refr, strgfrsttemp)[q]
refrvarbseco = getattr(gdat.refr, strgsecotemp)[q]
if len(refrvarbfrst) == 0 or len(refrvarbseco) == 0:
refrvarbfrst = np.array([limtfrst[0] * 0.1])
refrvarbseco = np.array([limtseco[0] * 0.1])
axis.scatter(refrvarbfrst, refrvarbseco, alpha=gdat.alphelem, color=gdat.refr.colrelem[q], label=gdat.refr.lablelem[q], s=sizelarg)
plot_sigmcont(gdat, strgmodl, axis, strgfrst, indxpoplfrst, strgseco=strgseco)
scalfrst = getattr(gmod.scalpara, strgfrst)
scalseco = getattr(gmod.scalpara, strgseco)
if scalfrst == 'logt':
axis.set_xscale('log')
if scalseco == 'logt':
axis.set_yscale('log')
axis.set_xlabel(getattr(gmod.labltotlpara, strgfrst))
axis.set_ylabel(getattr(gmod.labltotlpara, strgseco))
axis.set_xlim(getattr(gmod.limtpara, strgfrst))
axis.set_ylim(getattr(gmod.limtpara, strgseco))
make_legd(axis)
plt.tight_layout()
if strgstat == 'pdfn':
strgmometemp = strgmome
else:
strgmometemp = ''
nameinte = strgelemtdimvarb + 'tdim/'
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, '%s%s' % (strgmometemp, strgtotl), nameinte=nameinte)
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_sigmcont(gdat, strgmodl, axis, strgfrst, indxpoplfrst, strgseco=None):
if strgfrst == 'deltllik' or strgseco == 'deltllik':
for pval in gdat.pvalcont:
if strgfrst == 'deltllik':
deltlliksigm = scipy.stats.chi2.ppf(1. - pval, gmod.numbparagenrelemsing[indxpoplfrst])
axis.axvline(deltlliksigm, ls='--', color='black', alpha=0.2)
if strgseco == 'deltllik':
deltlliksigm = scipy.stats.chi2.ppf(1. - pval, gmod.numbparagenrelemsing[indxpoplfrst])
axis.axhline(deltlliksigm, ls='--', color='black', alpha=0.2)
def plot_gene(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgydat, strgxdat, typehist='hist', \
indxrefrplot=None, indxydat=None, strgindxydat=None, indxxdat=None, strgindxxdat=None, plottype='none', \
meanxdat=None, \
scal=None, scalxdat=None, scalydat=None, limtxdat=None, limtydat=None, omittrue=False, nameinte='', \
lablxdat='', lablydat='', histodim=False, offslegd=None, booltdim=False, ydattype='totl', boolhistprio=True):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
if strgydat[-8:-5] == 'pop':
boolelem = True
else:
boolelem = False
if scal is None:
if scalxdat is None:
scalxdat = 'linr'
if scalydat is None:
scalydat = 'linr'
else:
scalxdat = scal
scalydat = scal
if histodim:
figrsize = (gdat.plotsize, 0.8 * gdat.plotsize)
else:
figrsize = (gdat.plotsize, gdat.plotsize)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
if booltdim:
xdat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgxdat, strgpdfn)
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn)
else:
xdat = getattr(gdat.meanpara, strgxdat[4:])
if typehist == 'histcorrreca':
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], strgpdfn)
else:
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn)
if indxxdat is not None:
xdat = xdat[indxxdat]
if indxydat is not None:
ydat = ydat[indxydat]
xerr = np.zeros((2, xdat.size))
if booltdim:
axis.scatter(xdat, ydat, alpha=gdat.alphelem, color=colr, label=gdat.lablparagenrscalfull)
else:
if histodim:
# temp
if strgxdat[4:] in gmod.namepara.elem:
deltxdat = getattr(gdat.deltpara, strgxdat[4:])
binsxdat = getattr(gdat.binspara, strgxdat[4:])
else:
deltxdat = getattr(gdat.deltpara, strgxdat[4:])
binsxdat = getattr(gdat.binspara, strgxdat[4:])
xdattemp = binsxdat[:-1] + deltxdat / 2.
if strgmodl == 'fitt':
if boolelem:
if strgydat.startswith('cmpl'):
labl = gmod.lablelem[int(strgydat[-5])]
colr = gmod.colrelem[int(strgydat[-5])]
else:
labl = gmod.lablelem[int(strgydat[-1])]
colr = gmod.colrelem[int(strgydat[-1])]
else:
labl = gmod.labl
colr = gmod.colr
if strgstat == 'pdfn':
if typehist == 'histcorrreca':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], strgpdfn, strgmome='errr')
else:
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn, strgmome='errr')
if indxydat is not None:
yerr = yerr[[slice(None)] + indxydat]
# label
if strgydat.startswith('hist'):
## element distribution
labl = gdat.lablsampdist
else:
## other
labl = gdat.lablsampdist
# draw points
indxerrr = np.where((yerr[0, :] > 0.) | (yerr[1, :] > 0.))[0]
if indxerrr.size > 0:
labltemp = None
else:
labltemp = labl
temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, xerr=xerr, label=labl, \
marker='o', ls='', markersize=5, color=colr, lw=1, capsize=5)
# draw error-bar caps
if indxerrr.size > 0:
temp, listcaps, temp = axis.errorbar(xdat[indxerrr], ydat[indxerrr], yerr=yerr[:, indxerrr], xerr=xerr[:, indxerrr], \
marker='o', ls='', markersize=5, color=colr, lw=1, capsize=5)
for caps in listcaps:
caps.set_markeredgewidth(1)
elif strgstat == 'this' or strgstat == 'mlik':
if strgstat == 'this':
labl = gdat.lablsamp
else:
labl = gdat.lablmlik
if histodim:
axis.bar(xdattemp, ydat, deltxdat, label=gdat.lablparagenrscalfull, alpha=0.5, linewidth=1, edgecolor=colr)
else:
if plottype == 'errr':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgydat, strgpdfn, strgmome='errr')
if indxydat is not None:
yerr = yerr[[slice(None)] + indxydat]
temp, listcaps, temp = axis.errorbar(xdat, ydat, yerr=yerr, xerr=xerr, \
marker='o', ls='', markersize=5, label=labl, lw=1, capsize=5, color=colr)
for caps in listcaps:
caps.set_markeredgewidth(1)
else:
axis.plot(xdat, ydat, label=gdat.lablparagenrscalfull, alpha=0.5, color=colr)
# reference histogram
if not omittrue:
for q in gdat.indxrefr:
if boolelem:
if strgydat[-12:-8] in gdat.listnamerefr:
name = 'refr' + strgydat[:-12] + 'pop%d' % q + strgydat[-4:]
else:
name = 'refr' + strgydat[:-8] + 'pop%d' % q + strgydat[-4:]
else:
name = 'refr' + strgydat
if not hasattr(gdat, name):
continue
ydattemp = getattr(gdat, name)
ydat = ydattemp
if indxydat is not None:
ydat = ydat[indxydat]
if strgydat[-8:-5] == 'pop':
labl = gdat.refr.lablelem[q]
colr = gdat.refr.colrelem[q]
else:
labl = gdat.refr.labl
colr = gdat.refr.colr
if histodim:
axis.bar(xdattemp, ydat, deltxdat, color=colr, label=labl, alpha=gdat.alphhist, linewidth=1, edgecolor=colr)
else:
axis.plot(xdat, ydat, color=colr, label=labl, alpha=gdat.alphline)
try:
if histodim:
if typehist == 'histcorrreca':
reca = getattr(gdat.true, 'reca' + strgydat[4:])
axis.plot(xdattemp, 10. * reca, color='purple', label='PTFN', alpha=gdat.alphline)
except:
pass
if not boolelem:
break
# external reference histogram
if histodim and strgydat == 'histfluxpop0':
try:
if gdat.listprefhistfluxlabl is not None:
for k in range(len(gdat.listprefhistfluxlabl)):
if gdat.listprefhistfluxtype[k] == 'shad':
axis.plot(gdat.listprefhistfluxflux[k][0], gdat.listprefhistfluxhist[k][0], color='m', label=gdat.listprefhistfluxlabl[k])
enerpoly = np.empty(gdat.listprefhistfluxflux[k][1].size + gdat.listprefhistfluxflux[k][2].size)
enerpoly[:gdat.listprefhistfluxflux[k][1].size] = gdat.listprefhistfluxflux[k][1]
enerpoly[gdat.listprefhistfluxflux[k][1].size:] = gdat.listprefhistfluxflux[k][2][::-1]
sbrtpoly = np.empty(gdat.listprefhistfluxflux[k][1].size + gdat.listprefhistfluxflux[k][2].size)
sbrtpoly[:gdat.listprefhistfluxflux[k][1].size] = gdat.listprefhistfluxhist[k][1]
sbrtpoly[gdat.listprefhistfluxflux[k][1].size:] = gdat.listprefhistfluxhist[k][2][::-1]
axis.fill(enerpoly, sbrtpoly, color='m', alpha=0.5)
else:
axis.errorbar(gdat.listprefhistfluxflux[k], gdat.listprefhistfluxhist[k], label=gdat.listprefhistfluxlabl[k], color='m')
except:
pass
if strgydat.startswith('histcntp'):
ydattemp = getattr(gmodstat, strgydat)
axis.bar(xdattemp, ydattemp, deltxdat, color='black', label='Data', alpha=gdat.alphhist, linewidth=1, edgecolor='black')
# axis scales
if scalxdat == 'logt':
axis.set_xscale('log')
if scalydat == 'logt':
if np.where(ydat > 0.)[0].size > 0:
axis.set_yscale('log')
# axis labels
axis.set_xlabel(lablxdat)
axis.set_ylabel(lablydat)
# superimpose prior on the feature
ptch = None
line = None
if strgydat.startswith('hist') and strgydat != 'histdefl' and strgydat != 'histdeflelem' and boolhistprio:
if strgydat[-8:-5] == 'pop':
strgtemp = strgydat[4:-8]
if strgtemp in gmod.namepara.genrelem[int(strgydat[-5])]:
xdatprio = getattr(gmod, strgxdat + 'prio')
if gdat.typedata == 'mock' and not omittrue:
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
if strgtemp in gmod.namepara.genrelem[q]:
truexdatprio = getattr(gdat.true, strgxdat + 'prio')
trueydatsupr = getattr(gdat.true, strgydat + 'prio')
trueydatsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'true', strgydat + 'prio', strgpdfn)
axis.plot(truexdatprio, trueydatsupr, ls='-', alpha=gdat.alphline, color=gdat.refr.colrelem[q])
if strgmodl != 'true':
ydatsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'fitt', strgydat + 'prio', strgpdfn)
if strgstat == 'pdfn':
yerrsupr = retr_fromgdat(gdat, gdatmodi, strgstat, 'fitt', strgydat + 'prio', strgpdfn, strgmome='errr')
labl = gdat.lablsampdist + ' hyper-distribution'
ptch, line = tdpy.plot_braz(axis, xdatprio, ydatsupr, yerr=yerrsupr, lcol='lightgrey', dcol='grey', labltotl=labltotl)
else:
axis.plot(xdatprio, ydatsupr, ls='--', alpha=gdat.alphline, color=gmod.colrelem[int(strgydat[-5])])
for name, valu in gdat.refr.__dict__.items():
if name[8:12] == 'hist' and name[12:16] == strgydat[4:] and name[16:19] == 'pop' and int(name[-1]) == indxpopltemp:
colr = getattr(gdat, name + 'colr')
linestyl = getattr(gdat, name + 'linestyl')
axis.plot(valu[0, :], valu[1, :], ls=linestyl, color=colr)
if strgydat.startswith('hist') and strgydat[4:-8] == 'deltllik':
plot_sigmcont(gdat, strgmodl, axis, strgxdat[4:], int(strgydat[-1]))
if indxydat is not None:
strgydat += strgindxydat
if indxxdat is not None:
strgxdat += strgindxxdat
if limtxdat is not None:
axis.set_xlim(limtxdat)
else:
axis.set_xlim([np.amin(xdat), np.amax(xdat)])
if limtydat is not None:
axis.set_ylim([limtydat[0], limtydat[1]])
else:
axis.set_ylim([np.amin(ydat), np.amax(ydat)])
if ydattype != 'totl':
strgydat += ydattype
try:
make_legd(axis, offs=offslegd, ptch=ptch, line=line)
except:
print('Legend failed when')
print('strgstat')
print(strgstat)
print('strgmodl')
print(strgmodl)
print('strgydat')
print(strgydat)
raise Exception('')
plt.tight_layout()
if typehist == 'histcorrreca':
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'histcorrreca' + strgydat[4:], nameinte=nameinte)
else:
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgydat, nameinte=nameinte)
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_scatassc(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, q, l, strgfeat, plotdiff=False):
if plotdiff:
figrsize = (gdat.plotsize, 0.7 * gdat.plotsize)
else:
figrsize = (gdat.plotsize, gdat.plotsize)
figr, axis = plt.subplots(1, 1, figsize=figrsize)
# prepare data to be plotted
xdat = np.copy(getattr(gdat.refr, strgfeat)[q][0, :])
xerr = tdpy.retr_errrvarb(getattr(gdat.refr, strgfeat)[q])
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgfeat + 'asscpop%dpop%d' % (q, l), strgpdfn)
yerr = np.zeros((2, ydat.size))
if strgstat == 'pdfn':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgfeat + 'asscpop%dpop%d' % (q, l), strgpdfn, strgmome='errr')
if plotdiff:
ydat = 100. * (ydat - xdat) / xdat
# handle the case when there is a single reference element
if yerr.ndim == 1:
ydat = np.array([ydat])
yerr = yerr[:, None]
# plot all associations
if plotdiff:
indx = np.where(ydat > -100.)[0]
else:
indx = np.where(ydat > 0.)[0]
if indx.size > 0:
axis.errorbar(xdat[indx], ydat[indx], ls='', yerr=yerr[:, indx], xerr=xerr[:, indx], lw=1, marker='o', markersize=5, color='black')
# temp -- plot associations inside the comparison area
if plotdiff:
axis.axhline(0., ls='--', alpha=gdat.alphline, color='black')
else:
axis.plot(binsplot, binsplot, ls='--', alpha=gdat.alphline, color='black')
lablxdat = getattr(gmod.lablpara, strgfeat + 'refr')
lablydat = getattr(gmod.lablpara, strgfeat + 'paragenrscalfull')
axis.set_xlabel(lablxdat)
axis.set_ylabel(lablydat)
boollogtxaxi = False
boollogtyaxi = False
if indx.size > 0 and scal == 'logt':
if not plotdiff:
axis.set_yscale('log')
boollogtyaxi = True
axis.set_xscale('log')
boollogtaxis = True
if plotdiff:
limtydat = np.array([-100., 100.])
else:
limtydat = np.array([minmplot, maxmplot])
limtxdat = [minmplot, maxmplot]
# overplot text
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
if boollogtxaxi:
sizexoff = 0.01 * xdat[indx[k]]
else:
sizexoff = 0.01 * (limtxdat[1] - limtxdat[0])
if boollogtyaxi:
sizeyoff = 0.01 * ydat[indx[k]]
else:
sizeyoff = 0.01 * (limtydat[1] - limtydat[0])
axis.text(xdat[indx[k]] + sizexoff, ydat[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
axis.set_ylim(limtydat)
axis.set_xlim(limtxdat)
plt.tight_layout()
if plotdiff:
strgtype = 'diff'
else:
strgtype = ''
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, 'scatassc' + strgfeat + '%spop%dpop%d' % (strgtype, q, l), nameinte='assc')
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_scatcntp(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, indxevttplot, indxenerplot=None):
gmod = getattr(gdat, strgmodl)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
ydat = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'cntpmodl', strgpdfn)
if indxenerplot is None:
xdat = gdat.cntpdata[:, :, indxevttplot].flatten()
ydat = ydat[:, :, indxevttplot].flatten()
nameplot = 'scatcntpevt%d' % (indxevttplot)
if strgstat == 'pdfn':
indxvarb = [slice(None), slice(None), indxevttplot]
else:
xdat = gdat.cntpdata[indxenerplot, :, indxevttplot]
ydat = ydat[indxenerplot, :, indxevttplot]
nameplot = 'scatcntpen%02devt%d' % (indxenerplot, indxevttplot)
if strgstat == 'pdfn':
indxvarb = [indxenerplot, slice(None), indxevttplot]
if strgstat == 'pdfn':
yerr = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, 'cntpmodl', strgpdfn, strgmome='errr', indxvarb=indxvarb)
colr = gmod.colr
if strgstat == 'pdfn':
axis.errorbar(xdat, ydat, yerr=yerr, marker='o', ls='', markersize=5, color=gmod.colr, capsize=5)
else:
axis.plot(xdat, ydat, marker='o', ls='', markersize=5, color=gmod.colr)
gdat.limtcntpdata = [gdat.binspara.cntpdata[0], gdat.binspara.cntpdata[-1]]
axis.set_xlim(gdat.limtcntpdata)
axis.set_ylim(gdat.limtcntpdata)
axis.set_ylabel('$k^{modl}$')
axis.set_xlabel('$k^{data}$')
axis.set_xscale('log')
axis.set_yscale('log')
plt.tight_layout()
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_indxprox(gdat):
numbbins = 40
numbfluxprox = len(gdat.indxpixlprox)
bins = np.empty((gdat.numbprox, numbbins + 1))
indxpixlproxsize = np.empty((numbfluxprox, gdat.numbpixlfull))
for h in gdat.indxprox:
for j in gdat.indxpixlfull:
try:
indxpixlproxsize[h, j] = gdat.indxpixlprox[h][j].size
except:
indxpixlproxsize[h, j] = gdat.numbpixlfull
bins[h, :] = np.logspace(np.log10(np.amin(indxpixlproxsize[h, :])), np.log10(np.amax(indxpixlproxsize[h, :])), numbbins + 1)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
for h in gdat.indxprox:
axis.hist(indxpixlproxsize[h, :], bins=bins[h, :], log=True, label='Flux bin %d' % h, alpha=gdat.alphhist)
axis.set_xscale('log')
axis.axvline(gdat.numbpixlfull, label='ROI', ls='--')
axis.set_xlabel('Number of pixels')
axis.set_ylabel("Number of tables")
make_legd(axis)
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'init/indxprox.pdf')
plt.close()
def plot_psfn_type():
devi = np.linspace(0., 5., 100)
y = np.zeros((x.size, 5))
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
singgaus = retr_singgaus(devi, 0.25)
axis.plot(devi, singgaus, label='Single Gaussian')
singking = retr_singking(devi, 0.25, 10.)
axis.plot(devi, singking, label='Single King')
doubgaus = retr_doubgaus(devi, 0.1, 0.25, 1.)
axis.plot(devi, doubgaus, label='Double Gaussian')
gausking = retr_gausking(devi, 0.1, 0.25, 1., 10.)
axis.plot(devi, gausking, label='Gaussian + King')
doubking = retr_doubking(devi, 0.1, 0.25, 10., 1., 5.)
axis.plot(devi, doubking, label='Double King')
make_legd(axis)
axis.set_xscale('log')
axis.set_yscale('log')
axis.set_ylim([1e-3, None])
def plot_evidtest():
minmgain = -1.
maxmgain = 5.
minmdevi = 0.
maxmdevi = 5.
gain = np.linspace(minmgain, maxmgain, 100)
devi = np.linspace(minmdevi, maxmdevi, 100)
evid = np.log(np.sqrt(1. + np.exp(2. * gain[None, :])) * np.exp(-devi[:, None]**2 / 2. / (1. + 1. / np.exp(2. * gain[None, :]))))
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
figr.suptitle('Log-Bayesian Evidence For Lower-Dimension Model', fontsize=18)
imag = axis.imshow(evid, extent=[minmgain, maxmgain, minmdevi, maxmdevi], cmap='winter', origin='lower')
cset1 = plt.contourf(gain, devi, evid, cmap='winter')
axis.set_xlabel('Information gain')
axis.set_ylabel('Goodness of fit')
plt.colorbar(imag, ax=axis, fraction=0.03)
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'evidtest.pdf')
plt.close(figr)
def plot_histlgalbgalelemstkd(gdat, strgpdfn, indxpoplplot, strgbins, strgfeat=None):
if strgfeat is not None:
numbparaplot = gdat.numbbinsplot
else:
numbparaplot = 1
if strgbins == 'cumu':
numbrows = 1
numbcols = 1
else:
numbcols = 2
if strgbins == 'full':
numbrows = numbparaplot / 2
else:
numbrows = 2
histlgalbgalelemstkd = getattr(gdat, strgpdfn + 'histlgalbgalelemstkd')
figr, axgr = plt.subplots(numbrows, numbcols, figsize=(numbcols * gdat.plotsize, numbrows * gdat.plotsize), sharex='all', sharey='all')
if numbrows == 1:
axgr = [axgr]
for a, axrw in enumerate(axgr):
if numbcols == 1:
axrw = [axrw]
for b, axis in enumerate(axrw):
if strgfeat is not None:
h = a * 2 + b
if strgbins == 'full':
indxlowr = h
indxuppr = h + 1
elif strgbins == 'cumu':
indxlowr = 0
indxuppr = numbparaplot
else:
if h < 3:
indxlowr = 2 * h
indxuppr = 2 * (h + 1)
else:
indxlowr = 2 * h
indxuppr = numbparaplot
temp = np.sum(histlgalbgalelemstkd[indxpoplplot][:, :, indxlowr:indxuppr], 2).T
else:
temp = np.sum(np.sum(histlgalbgalelemstkd[indxpoplplot], 2), 2).T
if np.where(temp > 0.)[0].size > 0:
imag = axis.imshow(temp, interpolation='nearest', origin='lower', cmap='BuPu', \
extent=gdat.exttrofi, norm=mpl.colors.LogNorm(vmin=0.5, vmax=None))
else:
imag = axis.imshow(temp, interpolation='nearest', origin='lower', cmap='BuPu', extent=gdat.exttrofi)
if strgfeat is not None:
bins = getattr(gdat.binspara, strgfeat)
# superimpose reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
# temp -- backcomp
reframpl = getattr(gdat.refr, gdat.refr.nameparagenrelemampl[q])
if strgfeat in gdat.refr.namepara.elem[q]:
refrfeat = getattr(gdat.refr, strgfeat)[q]
if len(refrfeat) > 0:
indxelem = np.where((bins[indxlowr] < refrfeat[0, :]) & (refrfeat[0, :] < bins[indxuppr]))[0]
else:
indxelem = np.array([])
else:
indxelem = np.arange(gdat.refr.numbelem[q])
# temp -- backcomp
try:
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl[q][0, indxelem], gdat.refr.nameparagenrelemampl[q])
except:
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl[q][0, indxelem], gdat.refr.nameparagenrelemampl[q])
if indxelem.size > 0:
axis.scatter(gdat.anglfact * gdat.refr.dictelem[q]['lgal'][0, indxelem], gdat.anglfact * gdat.refr.dictelem[q]['bgal'][0, indxelem], \
s=mrkrsize, alpha=gdat.alphelem, marker=gdat.refrlistmrkrhits[q], lw=2, color=gdat.refr.colrelem[q])
if a == numbrows - 1:
axis.set_xlabel(gdat.labllgaltotl)
else:
axis.set_xticklabels([])
if b == 0:
axis.set_ylabel(gdat.lablbgaltotl)
else:
axis.set_yticklabels([])
draw_frambndr(gdat, axis)
if strgbins != 'cumu':
titl = tdpy.mexp(bins[indxlowr]) + ' < $%s$ < ' % lablfeat + tdpy.mexp(bins[indxuppr])
axis.set_title(titl)
if strgfeat is not None:
lablfeattotl = getattr(gmod.lablpara, strgfeat + 'totl')
plt.figtext(0.5, 0.95, '%s' % lablfeattotl, ha='center', va='center')
axiscomm = figr.add_axes([0.87, 0.2, 0.02, 0.6])
cbar = figr.colorbar(imag, cax=axiscomm)
plt.subplots_adjust()
#plt.subplots_adjust(left=0.18, top=.9, right=0.82, bottom=0.15, hspace=0.08, wspace=0.08)
if strgbins == 'cumu':
strgtemp = ''
else:
strgtemp = strgfeat
path = getattr(gdat, 'path' + strgpdfn + 'finl') + 'histlgalbgalelemstkd%s%spop%d' % (strgbins, strgtemp, indxpoplplot) + '.pdf'
figr.savefig(path)
plt.close(figr)
def plot_king(gdat):
angl = rad2deg(gdat.binspara.angl)
figr, axgr = plt.subplots(1, 2, figsize=(2 * gdat.plotsize, gdat.plotsize))
figr.suptitle('King Function', fontsize=20)
for k, axis in enumerate(axgr):
if k == 0:
sigmlist = [0.25]
gammlist = [1.01, 2.5, 10.]
else:
sigmlist = [0.1, 0.25, 1.]
gammlist = [2.]
for sigm in sigmlist:
for gamm in gammlist:
axis.plot(angl, retr_singking(angl, sigm, gamm), label=r'$\sigma = %.4g, \gamma = %.3g$' % (sigm, gamm))
make_legd(axis)
axis.set_yscale('log')
axis.set_xlabel(gdat.labltotlpara.gang)
axis.set_xlabel(r'$\mathcal{K}$')
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'king.pdf')
plt.close(figr)
def plot_intr(gdat):
if gdat.typeverb > 0:
print('Making PCAT introductory plots...')
#plot_grap(plottype='meta', typeverb=1)
plot_grap(plottype='lght0000', typeverb=1)
#plot_grap(plottype='lght0001', typeverb=1)
#plot_grap(plottype='lght0002', typeverb=1)
#plot_grap(plottype='lght0003', typeverb=1)
#plot_grap(plottype='lens0000', typeverb=1)
plot_grap(plottype='lens0001', typeverb=1)
with plt.xkcd():
from matplotlib import patheffects
mpl.rcParams['path.effects'] = [patheffects.withStroke(linewidth=0)]
figr, axis = plt.subplots(figsize=(2 * gdat.plotsize, gdat.plotsize))
catl = np.arange(80)
probcatl = pss.pmf(catl, 30.) + 0.5 * pss.pmf(catl, 60.)
axis.plot(catl, probcatl)
axis.set_xticks([10, 30, 60])
axis.set_xticklabels(["Crackpot's Catalog", "Best-fit catalog", "Not-so-best-fit catalog"])
axis.set_yticks([])
text = axis.set_title("Exploring the catalog space with Probabilistic cataloging")
text.set_position([.5, 1.05])
axis.set_xlabel('Catalog index')
axis.set_ylabel("Probability")
axis.tick_params(axis='x', colors='#B6E954')
axis.tick_params(axis='y', colors='#B6E954')
axis.spines['bottom'].set_color('#B6E954')
axis.spines['top'].set_color('#B6E954')
axis.spines['right'].set_color('#B6E954')
axis.spines['left'].set_color('#B6E954')
axis.yaxis.label.set_color('#B6E954')
axis.xaxis.label.set_color('#B6E954')
axis.title.set_color('#B6E954')
axis.set_axis_bgcolor('black')
figr.set_facecolor('black')
plt.tight_layout()
figr.savefig(gdat.pathimag + 'talkintr.pdf', facecolor=figr.get_facecolor())
plt.close()
def plot_psfn(gdat, gdatmodi, strgstat, strgmodl):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
for i in gdat.indxener:
for m in gdat.indxevtt:
for k in range(gdat.numbprox + 1):
if k == 0 or k == gdat.numbprox:
alph = 1.
colr = 'b'
if k == 0:
labl = 'Dimmest PS'
else:
labl = 'Brightest PS'
else:
alph = 0.2
labl = None
colr = 'black'
axis.plot(gdat.binspara.angl * gdat.anglfact, gdat.binspara.prox[k] * gmodstat.psfn[i, :, m], label=labl, color=colr, alpha=alph)
axis.set_xlim([np.amin(gdat.binspara.angl) * gdat.anglfact, np.amax(gdat.binspara.angl) * gdat.anglfact])
if k > 0:
axis.axvline(gdat.anglfact * gdat.maxmangleval[k-1], ls='--', alpha=alph, color=colr)
axis.set_yscale('log')
axis.set_xlabel(gdat.labltotlpara.gang)
axis.set_ylabel(gdat.lablsbrttotl)
limt = gdat.specfraceval * np.amax(gdat.binspara.prox[0] * gmodstat.psfn[i, :, m])
if limt != 0.:
axis.axhline(limt, color='red', ls=':', label='Flux floor')
make_legd(axis)
plt.tight_layout()
name = 'psfn'
if gdat.numbener > 1:
name += 'en%02d' % i
if gdat.numbevtt > 1:
name += 'evt%d' % m
figr.savefig(gdat.pathinit + name + '.pdf')
plt.close(figr)
def plot_mosa(gdat, strgpdfn):
# empty global object
gdatmodi = tdpy.gdatstrt()
listparagenrscalfull = getattr(gdat, 'list' + strgpdfn + 'paragenrscalfull')
listparagenrunitfull = getattr(gdat, 'list' + strgpdfn + 'paragenrunitfull')
numbrows = 3
numbcols = 2
numbsampmosa = numbrows * numbcols
if numbsampmosa <= gdat.numbsamptotl:
indxsampmosa = np.random.choice(gdat.indxsamptotl, size=numbsampmosa, replace=False)
for l in gmod.indxpopl:
for i in gdat.indxener:
for m in gdat.indxevttplot:
figr, axgr = plt.subplots(numbrows, numbcols, figsize=(numbcols * gdat.plotsize, numbrows * gdat.plotsize))
for a, axrw in enumerate(axgr):
for b, axis in enumerate(axrw):
n = indxsampmosa[numbcols*a+b]
gdatmodi.this.paragenrscalfull = listparagenrscalfull[n, :].flatten()
gdatmodi.this.paragenrunitfull = listparagenrunitfull[n, :].flatten()
if gmod.numbparaelem > 0:
gdatmodi.this.indxelemfull = getattr(gdat, 'list' + strgpdfn + 'indxelemfull')[n]
proc_samp(gdat, gdatmodi, 'this', 'fitt')
if a == numbrows - 1:
axis.set_xlabel(gdat.labllgaltotl)
else:
axis.set_xticklabels([])
if b == 0:
axis.set_ylabel(gdat.lablbgaltotl)
else:
axis.set_yticklabels([])
imag = retr_imag(gdat, axis, gdat.cntpdata, '', 'fitt', 'cntpdata', i, m)
supr_fram(gdat, gdatmodi, 'this', 'fitt', axis, l)
if gdat.boolbinsener:
plt.figtext(0.5, 0.93, gdat.strgener[i], ha='center', va='center')
axiscomm = figr.add_axes([0.92, 0.1, 0.02, 0.8])
cbar = figr.colorbar(imag, cax=axiscomm)
cbar.set_ticks(gdat.valutickmajrpara.cntpdata)
cbar.set_ticklabels(gdat.labltickmajrpara.cntpdata)
plt.subplots_adjust()
#plt.subplots_adjust(left=0.1, top=.91, hspace=0.03, wspace=0.1, bottom=0.09)
if l == 1:
strg = ''
else:
strg = 'pop%d' % l
pathfinl = getattr(gdat, 'path' + strgpdfn + 'finl')
if m is None:
path = pathfinl + 'mosa' + strg + 'en%02dA.pdf' % (gdat.indxenerincl[i])
else:
path = pathfinl + 'mosa' + strg + 'en%02devtt%d.pdf' % (gdat.indxenerincl[i], gdat.indxevttincl[m])
figr.savefig(path)
plt.close(figr)
else:
if gdat.typeverb > 0:
print('Skipping the mosaic plot...')
def plot_grap(plottype, typeverb=0):
import networkx as nx
figr, axis = plt.subplots(figsize=(6, 6))
grap = nx.DiGraph()
if plottype == 'meta':
listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'magenta']
if plottype == 'lens0001':
listcolr = ['olive', 'olive', 'black', 'magenta', 'magenta', 'magenta', 'magenta', 'magenta', 'olive', 'olive', 'olive', 'olive', 'olive', \
r'black', 'olive', 'black']
if plottype == 'lght0000':
listcolr = [r'olive', r'black', r'magenta', r'magenta', 'magenta', r'magenta', r'olive', r'olive', r'black', r'olive', r'olive', r'black', r'olive']
if plottype == 'lght0001':
listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'olive', 'olive', 'magenta', 'magenta', 'magenta', 'magenta', 'black']
if plottype == 'lght0002':
listcolr = ['black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', 'olive', 'olive', 'olive', 'magenta', \
'magenta', 'magenta', 'magenta', 'magenta', 'black']
if plottype == 'lght0003':
listcolr = ['black', 'black', 'black', 'olive', 'black', 'olive', 'olive', 'black', 'olive', \
'olive', 'olive', 'magenta', 'magenta', 'magenta', 'magenta']
if plottype == 'lens0000':
listcolr = ['olive', 'black', 'black', 'olive', 'olive', 'olive', 'olive', 'black', 'olive', 'magenta', 'magenta', 'magenta']
if plottype.startswith('meta'):
grap.add_edges_from([ \
('meanelem', 'numbelem'), \
('modl','data'), \
('psfp', 'modl'), \
('feat','modl'), \
('numbelem','feat'), \
('amplslop', 'ampl'), \
])
if plottype.startswith('lght') or plottype.startswith('lens'):
grap.add_edges_from([ \
('meanelem', 'numbelem'), \
('modl','data'), \
('psfp', 'modl'), \
('bacp', 'modl'), \
('lgal','modl'), \
('bgal','modl'), \
('numbelem','lgal'), \
('numbelem','bgal'), \
])
if plottype.startswith('lght'):
grap.add_edges_from([ \
('amplslop', 'ampl'), \
('ampl', 'modl'), \
('numbelem','ampl'), \
('numbelem', 'sind'), \
('sind','modl'), \
])
if plottype.startswith('lens'):
grap.add_edges_from([ \
('lenp', 'modl'), \
('defsslop', 'defs'), \
('defs', 'modl'), \
('numbelem','defs'), \
])
if plottype == 'lens0001':
grap.add_edges_from([ \
('asca', 'modl'), \
('numbelem','asca'), \
('acut', 'modl'), \
('numbelem','acut'), \
])
if plottype == 'lght0001' or plottype == 'lght0002':
grap.add_edges_from([ \
('sinddistmean', 'sind'), \
])
if plottype == 'lght0002':
grap.add_edges_from([ \
('numbelem', 'expc'), \
('expc', 'modl'), \
])
if plottype == 'lght0003':
grap.add_edges_from([ \
('spatdistcons', 'lgal'), \
('spatdistcons', 'bgal'), \
])
labl = {}
if plottype.startswith('lens'):
nameelem = r'\rm{sub}'
else:
nameelem = r'\rm{pts}'
if plottype.startswith('lght') and (plottype == 'lght0001' or plottype == 'lght0002'):
labl['numbelem'] = r'$\vec{N}_{%s}$' % nameelem
labl['meanelem'] = r'$\vec{\mu}_{%s}$' % nameelem
else:
labl['numbelem'] = '$N_{%s}$' % nameelem
labl['meanelem'] = r'$\mu_{%s}$' % nameelem
if plottype.startswith('lght'):
if plottype == 'lght0000' or plottype == 'lght0003':
labl['amplslop'] = r'$\alpha$'
else:
labl['amplslop'] = r'$\vec{\alpha}$'
if plottype.startswith('lens'):
labl['defsslop'] = r'$\beta$'
if plottype == 'lght0001' or plottype == 'lght0002':
labl['sinddistmean'] = r'$\vec{\beta}$'
if plottype == 'lght0003':
labl['spatdistcons'] = r'$\gamma$'
if plottype.startswith('lens'):
labl['lenp'] = r'$\vec{\chi}$'
labl['psfp'] = r'$\vec{\eta}$'
labl['bacp'] = r'$\vec{A}$'
labl['lgal'] = r'$\vec{\theta_1}$'
labl['bgal'] = r'$\vec{\theta_2}$'
if plottype.startswith('meta'):
labl['feat'] = r'$\vec{\xi}$'
else:
if plottype.startswith('lght'):
labl['sind'] = r'$\vec{s}$'
labl['ampl'] = r'$\vec{f}$'
else:
labl['defs'] = r'$\vec{\alpha_{\rm{s}}}$'
if plottype == 'lens0001':
labl['asca'] = r'$\vec{\theta_{\rm{s}}}$'
labl['acut'] = r'$\vec{\theta_{\rm{c}}}$'
if plottype == 'lght0002':
labl['expc'] = r'$\vec{E_{\rm{c}}}$'
labl['modl'] = r'$M_D$'
labl['data'] = r'$D$'
posi = nx.circular_layout(grap)
posi['sinddistmean'] = np.array([0.4, 0.15])
if plottype == 'lght0003':
posi['spatdistcons'] = np.array([-0.2, 0.15])
if plottype.startswith('lght'):
posi['numbelem'] = np.array([0., 0.075])
posi['meanelem'] = np.array([0., 0.15])
posi['amplslop'] = np.array([0.2, 0.15])
if plottype.startswith('lens'):
posi['numbelem'] = np.array([-0.1, 0.075])
posi['meanelem'] = np.array([-0.1, 0.15])
posi['defsslop'] = np.array([0.1, 0.15])
if plottype.startswith('lght'):
if plottype == 'lght0002':
posi['psfp'] = np.array([0.7, -0.0])
posi['bacp'] = np.array([0.9, -0.0])
else:
posi['psfp'] = np.array([0.5, -0.0])
posi['bacp'] = np.array([0.7, -0.0])
if plottype == 'lens0000':
posi['psfp'] = np.array([0.3, -0.0])
posi['bacp'] = np.array([0.5, -0.0])
posi['lenp'] = np.array([0.7, -0.0])
if plottype == 'lens0001':
posi['psfp'] = np.array([0.7, -0.0])
posi['bacp'] = np.array([0.9, -0.0])
posi['lenp'] = np.array([1.1, -0.0])
posi['lgal'] = np.array([-0.3, -0.0])
posi['bgal'] = np.array([-0.1, -0.0])
if plottype.startswith('lght'):
posi['ampl'] = np.array([0.1, -0.0])
posi['sind'] = np.array([0.3, -0.0])
if plottype == 'lght0002':
posi['expc'] = np.array([0.5, -0.0])
if plottype.startswith('lens'):
posi['defs'] = np.array([0.1, -0.0])
if plottype == 'lens0001':
posi['asca'] = np.array([0.3, -0.0])
posi['acut'] = np.array([0.5, -0.0])
posi['modl'] = np.array([0., -0.075])
posi['data'] = np.array([0., -0.15])
if typeverb > 0:
numb = max(len(grap.edges()), len(listcolr))
for k in range(numb):
try:
print('%15s %15s %15s' % (grap.edges()[k][0], grap.edges()[k][1], listcolr[k]))
except:
print('unequal')
size = 1000
nx.draw(grap, posi, labels=labl, ax=axis, edgelist=[], nodelist=[])
nx.draw_networkx_edges(grap, posi, ax=axis, labels=labl, edge_color=listcolr)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['modl', 'data'], node_color='grey', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['numbelem'], node_color='b', node_size=size)
if plottype.startswith('lght'):
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['meanelem', 'amplslop'], node_color='r', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'ampl', 'sind'], node_color='g', node_size=size)
if plottype == 'lght0001' or plottype == 'lght0002':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['sinddistmean'], node_color='r', node_size=size)
if plottype == 'lght0002':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['expc'], node_color='g', node_size=size)
if plottype == 'lght0003':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['spatdistcons'], node_color='r', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['psfp', 'bacp'], node_color='y', node_size=size)
if plottype.startswith('lens'):
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['meanelem', 'defsslop'], node_color='r', node_size=size)
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lenp'], node_color='y', node_size=size)
if plottype == 'lens0000':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'defs'], node_color='g', node_size=size)
if plottype == 'lens0001':
nx.draw_networkx_nodes(grap, posi, ax=axis, labels=labl, nodelist=['lgal', 'bgal', 'defs', 'asca', 'acut'], node_color='g', node_size=size)
pathplot = pathpcat + '/imag/'
plt.tight_layout()
figr.savefig(pathplot + 'grap%s.pdf' % plottype)
plt.close(figr)
def plot_3fgl_thrs(gdat):
path = pathpcat + '/detthresh_P7v15source_4years_PL22.fits'
fluxthrs = astropy.io.fits.getdata(path, 0)
bgalfgl3 = np.linspace(-90., 90., 481)
lgalfgl3 = np.linspace(-180., 180., 960)
bgalexpo = np.linspace(-90., 90., 400)
lgalexpo = np.linspace(-180., 180., 800)
#fluxthrs = interp2d(lgalfgl3, bgalfgl3, fluxthrs)(lgalexpo, bgalexpo)
fluxthrs = griddata([lgalfgl3, bgalfgl3], fluxthrs, [gdat.lgalheal])
cntsthrs = fluxthrs * gdat.expo
jbgal = np.where(abs(bgalexpo) < 10.)[0]
jlgal = np.where(abs(lgalexpo) < 10.)[0]
extent = [-10, 10, -10, 10]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.set_xlabel(gdat.labllgaltotl)
axis.set_ylabel(gdat.lablbgaltotl)
imag = plt.imshow(fluxthrs[np.amin(jbgal):np.amax(jbgal)+1, np.amin(jlghprofi):np.amax(jlghprofi)+1], origin='lower', cmap='Reds', extent=gdat.exttrofi)
plt.colorbar(imag, fraction=0.05)
plt.tight_layout()
figr.savefig(gdat.pathplotrtag + 'thrs.pdf')
plt.close(figr)
def plot_init(gdat):
print('Making initial plots...')
gmod = gdat.fitt
# make initial plots
if gdat.makeplot:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if (gmod.typeelemspateval[l] == 'locl' and gmod.maxmpara.numbelem[l] > 0) and gdat.numbpixl > 1:
plot_indxprox(gdat)
for i in gdat.indxener:
for m in gdat.indxevtt:
if gdat.typedata == 'mock' and gmod.boollens:
figr, axis, path = init_figr(gdat, None, 'post', 'cntpmodlraww', 'this', 'true', i, m, -1)
imag = retr_imag(gdat, axis, gmod.cntpmodlraww, 'this', 'true', 'cntpdata', i, m, booltdim=True)
make_cbar(gdat, axis, imag, 0, tick=gdat.valutickmajrpara.cntpdata, labltotl=gdat.lablcntpdata)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
if gdat.boolcorrexpo:
gdat.lablnumbpixl = r'$N_{\rm{pix}}$'
gdat.limtexpo = [gdat.minmpara.expo, gdat.maxmpara.expo]
if gdat.boolbinsener:
path = gdat.pathinit + 'expototlmean.pdf'
tdpy.plot_gene(path, gdat.meanpara.ener, gdat.expototlmean, scalxdat='logt', scalydat='logt', lablxdat=gdat.lablenertotl, \
lablydat=gdat.lablexpototl, limtydat=gdat.limtexpo)
for m in gdat.indxevtt:
for i in gdat.indxener:
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.hist(gdat.expo[i, :, m], gdat.binspara.expo)
axis.set_xlabel(gdat.labltotlpara.expo)
axis.set_ylabel(gdat.labltotlpara.numbpixl)
axis.set_xscale('log')
axis.set_yscale('log')
plt.tight_layout()
name = 'histexpo'
if gdat.numbener > 1:
name += 'en%02d' % i
if gdat.numbevtt > 1:
name += 'evt%d' % m
path = gdat.pathinit + name + '.pdf'
figr.savefig(path)
plt.close(figr)
if gdat.numbpixl > 1:
for i in gdat.indxener:
for m in gdat.indxevtt:
figr, axis, path = init_figr(gdat, None, 'post', 'expo', '', '', i, m, -1)
imag = retr_imag(gdat, axis, gdat.expo, None, None, 'expo', i, m)
make_cbar(gdat, axis, imag, i)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def plot_defl(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, \
strgvarb='defl', nameparagenrelem='', indxdefl=None, indxpoplplot=-1, multfact=1., indxenerplot=None, indxevttplot=None):
if indxdefl is not None:
strgvarb += 'sing'
strgvarb = strgvarb + nameparagenrelem
defl = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn)
defl *= multfact
if indxenerplot is not None:
defl = defl[indxenerplot, :, indxevttplot, ...]
if indxdefl is not None:
defl = defl[..., indxdefl]
strgvarb += '%04d' % indxdefl
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
figr, axis, path = init_figr(gdat, gdatmodi, strgpdfn, strgvarb, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot)
make_legdmaps(gdat, strgstat, strgmodl, axis)
draw_frambndr(gdat, axis)
defllgal = defl[:, :, 0]
deflbgal = defl[:, :, 1]
fact = 4
ptch = axis.quiver(gdat.anglfact * gdat.lgalgridcart[::fact, ::fact], gdat.anglfact * gdat.bgalgridcart[::fact, ::fact], \
gdat.anglfact * defllgal[::fact, ::fact], gdat.anglfact * deflbgal[::fact, ::fact], scale_units='xy', angles='xy', scale=1)
supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis)
plt.subplots_adjust(left=0.2, bottom=0.15, top=0.75, right=0.85)
plt.subplots_adjust()
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def plot_genemaps(gdat, gdatmodi, strgstat, strgmodl, strgpdfn, strgvarb, indxenerplot=None, indxevttplot=-1, strgcbar=None, \
booltdim=False, indxpoplplot=-1, strgmome='pmea'):
gmod = getattr(gdat, strgmodl)
if strgcbar is None:
strgcbar = strgvarb
# construct the string for the map
if strgvarb == 'cntpdata':
strgplot = strgvarb
else:
if strgstat == 'post':
strgtemp = strgmome + strgpdfn
else:
strgtemp = ''
strgplot = strgtemp + strgvarb
figr, axis, path = init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot)
maps = retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn)
imag = retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot, indxevttplot, booltdim=booltdim)
make_cbar(gdat, axis, imag, strgvarb)
make_legdmaps(gdat, strgstat, strgmodl, axis)
if gdat.boolsuprelem:
supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot)
print('strgvarb')
print(strgvarb)
plt.tight_layout()
savefigr(gdat, gdatmodi, figr, path)
plt.close(figr)
def init( \
# user interaction
## type of verbosity
typeverb=1, \
## path in which PCAT data lives
pathpcat=None, \
# miscelleneaous
## type of PDF to sample from
strgpdfn='post', \
# data
## type of data
### 'mock': simulated data
### 'inpt': input data
### 'real': real data retrieved from databases
typedata=None, \
## type of experiment
typeexpr='user', \
# diagnostics
## Boolean to enter the diagnostic mode
booldiagmode=True, \
## squeeze exposure to check the low sample limit
boolsqzeexpo=False, \
### explode exposure to check the large sample limit
boolexplexpo=False, \
## squeeze proposal scale to check the acceptance ratio
boolsqzeprop=False, \
## explode proposal scale to check the acceptance ratio
boolexplprop=False, \
## Boolean to thin down the data
boolthindata=False, \
## factor by which to thin down the data
factthin=None, \
# reference catalog
## Boolean to use the reference catalogs to associate
boolasscrefr=None, \
# sampling
## Boolean flag to make burn-in tempered
boolburntmpr=False, \
## number of sweeps
numbswep=100000, \
## number of samples
numbsamp=None, \
## number of initial sweeps to be burned
numbburn=None, \
# output
## Boolean to make condensed catalog
boolcondcatl=True, \
refrlabltotl=None, \
refrlablpopl=None, \
fittlablpopl=None, \
# numpy RNG seed
seedtype=0, \
## Boolean flag to re-seed each chain separately
boolseedchan=True, \
## optional deterministic seed for sampling element parameters
seedelem=None, \
indxevttincl=None, \
indxenerincl=None, \
listmask=None, \
# number of samples for Bootstrap
numbsampboot=None, \
listnamefeatsele=None, \
# type of mask for the exposure map
typemaskexpo='ignr', \
# type of exposure
## 'cons': constant
## 'file': provided in a file
typeexpo='cons', \
# maximum spatial distance out to which element kernel will be evaluated
maxmangleval=None, \
# initial state
initpsfprefr=False, \
initpsfp=None, \
# evaluate the likelihood inside circles around elements
typeelemspateval=None, \
namestattrue=None, \
# plotting
## Boolean flag to make the frame plots short
boolshrtfram=True, \
boolrefeforc=False, \
indxrefrforc=None, \
## Boolean to overplot the elements
boolsuprelem=True, \
## Boolean to plot the correlation between elements
boolplotelemcorr=True, \
## Boolean flag to vary the PSF
boolmodipsfn=False, \
# name of the configuration
strgcnfg=None, \
# model
## number of spatial dimensions
numbspatdims=2, \
# hyperparameters
fittampldisttype=None, \
# metamodel settings
## PSF evaluation type
## kernel evaluation type
kernevaltype='ulip', \
# photometric model
## base parameters
### Sersic type
typesers='vauc', \
## transdimensional parameters (elements)
### vary projected scale radius
variasca=True, \
### vary projected cutoff radius
variacut=True, \
# prior
penalpridiff=False, \
priotype='logt', \
priofactdoff=None, \
# initialization
## initialization type
inittype=None, \
loadvaripara=False, \
# save the state of the MCMC
savestat=False, \
namesavestat=None, \
# recover the state from a previous run
namerecostat=None, \
forcsavestat=False, \
# proposals
## Boolean flag to turn on proposals on element parameters
boolpropcomp=True, \
boolpropcova=True, \
propwithsing=True, \
# type of covariance estimation
typeopti='none', \
# modes of operation
## only generate and plot mock data
boolmockonly=False, \
## perform an additional run sampling from the prior
checprio=False, \
strgexprsbrt=None, \
anglassc=None, \
nameexpr=None, \
# likelihood dependent
## exposure map
expo=None, \
lgalprio=None, \
bgalprio=None, \
minmcntpdata=None, \
strgexpo=None, \
# number of processors
numbproc=None, \
# likelihood function
liketype='pois', \
# user-defined likelihood function
retr_llik=None, \
anlytype=None, \
lgalcntr=0., \
bgalcntr=0., \
maxmangl=None, \
# spatial grid
## type of spatial pixelization
typepixl=None, \
## Boolean flag to force Cartesian spatial grid
boolforccart=False, \
# number of pixels on a side in the Cartesian grid
numbsidecart=None, \
# Nside in Healpix
numbsideheal=256, \
allwfixdtrue=True, \
asscmetrtype='dist', \
# plotting
numbswepplot=None, \
# Boolean flagt to make the frame plots only for the central energy and PSF bin
boolmakeframcent=True, \
makeplot=True, \
makeplotinit=True, \
makeplotfram=True, \
makeplotfinlprio=True, \
makeplotfinlpost=True, \
makeplotintr=False, \
scalmaps='asnh', \
makeanim=True, \
strgenerfull=None, \
strgexprname=None, \
strganglunit=None, \
strganglunittext=None, \
anglfact=None, \
limtydathistfeat=None, \
# model
# emission
## elements
## PSF
specfraceval=None, \
numbangl=1000, \
binsangltype='logt', \
numbsidepntsprob=100, \
listprefsbrtsbrt=None, \
listprefsbrtener=None, \
listprefsbrtlabltotl=None, \
lablgangunit=None, \
labllgal=None, \
lablbgal=None, \
lablfluxunit=None, \
lablflux=None, \
strgenerunit=None, \
indxenerfull=None, \
indxevttfull=None, \
binsenerfull=None, \
asymfluxprop=False, \
## Boolean flag to make the PSF model informed
boolpriopsfninfo=False, \
## spectral
# lensing
fittrelnpowr=0., \
# temp
margfactmodl=1., \
maxmgangdata=None, \
# proposals
stdvprophypr=0.01, \
stdvproppsfp=0.1, \
stdvpropbacp=0.01, \
stdvproplenp=1e-4, \
stdvlgal=0.001, \
stdvbgal=0.001, \
stdvflux=0.001, \
stdvspep=0.001, \
stdvspmrsind=0.2, \
varistdvlbhl=True, \
rtagmock=None, \
## transdimensional proposal probabilities
probtran=None, \
probspmr=None, \
# when proposing from the covariance, fracproprand should be very small!
fracproprand=0., \
# standard deviation of the Gaussian from which the angular splitting will be drawn for splits and merges
radispmr=None, \
defa=False, \
**args \
):
# preliminary setup
# construct the global object
gdat = tdpy.gdatstrt()
for attr, valu in locals().items():
if '__' not in attr and attr != 'gdat':
setattr(gdat, attr, valu)
# copy all provided inputs to the global object
for strg, valu in args.items():
setattr(gdat, strg, valu)
# PCAT folders
if gdat.pathpcat is None:
gdat.pathpcat = os.environ["PCAT_DATA_PATH"] + '/'
if gdat.pathpcat[-1] != '/':
gdat.pathpcat += '/'
gdat.pathdata = gdat.pathpcat + 'data/'
gdat.pathdataopti = gdat.pathdata + 'opti/'
gdat.pathimag = gdat.pathpcat + 'imag/'
gdat.pathoutp = gdat.pathdata + 'outp/'
gdat.pathinpt = gdat.pathdata + 'inpt/'
# list of parameter groups
gdat.liststrggroppara = ['genrbase', 'genrelem', 'derifixd', 'derielem', 'genrelemextd', 'derielemextd', 'kind', 'full']
# list of parameter features to be turned into lists
gdat.listfeatparalist = ['minm', 'maxm', 'fact', 'scal', 'lablroot', 'lablunit', 'stdv', 'labltotl', 'name']
# list of parameter features
gdat.listfeatpara = gdat.listfeatparalist + ['limt', 'bins', 'delt', 'numb', 'indx', 'cmap', 'mean', 'tick', 'numbbins', 'valutickmajr', 'labltickmajr', 'valutickminr', 'labltickminr']
# run tag
gdat.strgswep = '%d' % (gdat.numbswep)
## time stamp
gdat.strgtimestmp = tdpy.retr_strgtimestmp()
## name of the configuration function
if gdat.strgcnfg is None:
gdat.strgcnfg = inspect.stack()[1][3]
gdat.strgvers = 'v0.3'
if gdat.typeverb > 0:
print('PCAT %s started at %s.' % (gdat.strgvers, gdat.strgtimestmp))
print('Configuration %s' % gdat.strgcnfg)
# string describing the number of sweeps
gdat.strgnumbswep = '%d' % gdat.numbswep
# output paths
gdat.rtag = retr_rtag(gdat.strgcnfg, gdat.strgnumbswep)
gdat.pathoutprtag = retr_pathoutprtag(gdat.pathpcat, gdat.rtag)
# physical constants
gdat.prsccmtr = 3.086e18
gdat.ergsgevv = 624.151
gdat.factnewtlght = 2.09e13 # Msun / pc
gdat.listnamepdir = ['forw', 'reve']
gdat.listlablpdir = ['f', 'r']
# number of standard deviations around mean of Gaussian-distributed variables
gdat.numbstdvgaus = 4.
# start the timer
gdat.timerealtotl = time.time()
gdat.timeproctotl = time.clock()
# list of parameter types
## 'genr': generative parameters
## 'deri': derived parameters
gdat.liststrgtypepara = ['genr', 'deri']
booltemp = chec_statfile(gdat.pathpcat, gdat.rtag, 'gdatmodi')
if booltemp:
print('gdatmodi already exists. Skipping...')
else:
# create output folder for the run
os.system('mkdir -p %s' % gdat.pathoutprtag)
# write the list of arguments to file
fram = inspect.currentframe()
listargs, temp, temp, listargsvals = inspect.getargvalues(fram)
fileargs = open(gdat.pathoutprtag + 'cmndargs.txt', 'w')
fileargs.write('PCAT call arguments\n')
for args in listargs:
fileargs.write('%s = %s\n' % (args, listargsvals[args]))
fileargs.close()
# write the list of arguments to file
fileargs = open(gdat.pathoutprtag + 'args.txt', 'w')
fileargs.write('PCAT call arguments\n')
for args in listargs:
fileargs.write('%20s %s\n' % (args, listargsvals[args]))
fileargs.close()
# defaults
if gdat.typedata is None:
if gdat.strgexprsbrt is None:
gdat.typedata = 'mock'
else:
gdat.typedata = 'inpt'
print('gdat.typedata')
print(gdat.typedata)
# list of models
gdat.liststrgmodl = []
if gdat.typedata == 'mock':
gdat.liststrgmodl += ['true']
gdat.liststrgmodl += ['fitt']
gdat.refr = tdpy.gdatstrt()
gdat.listgmod = []
for strgmodl in gdat.liststrgmodl + ['refr']:
setattr(gdat, strgmodl, tdpy.gdatstrt())
gmod = getattr(gdat, strgmodl)
for strgstat in ['this', 'next']:
setattr(gmod, strgstat, tdpy.gdatstrt())
for strgfeatpara in gdat.listfeatpara:
setattr(gmod, strgfeatpara + 'para', tdpy.gdatstrt())
gdat.listgmod += [gmod]
for strgfeatpara in gdat.listfeatpara:
setattr(gdat, strgfeatpara + 'para', tdpy.gdatstrt())
## number of processes
gdat.strgproc = os.uname()[1]
if gdat.numbproc is None:
if gdat.strgproc == 'fink1.rc.fas.harvard.edu' or gdat.strgproc == 'fink2.rc.fas.harvard.edu' or gdat.strgproc == 'wise':
gdat.numbproc = 1
else:
gdat.numbproc = 1
if gdat.typedata == 'inpt' and gdat.rtagmock is not None:
print('Will use %s to account for selection effects.' % gdat.rtagmock)
gdat.pathoutprtagmock = retr_pathoutprtag(gdat.pathpcat, gdat.rtagmock)
## number of burned sweeps
if gdat.numbburn is None:
print('gdat.numbswep')
print(gdat.numbswep)
gdat.numbburn = int(gdat.numbswep / 10)
print('gdat.numbburn')
print(gdat.numbburn)
# burn-in
gdat.factburntmpr = 0.75
gdat.numbburntmpr = gdat.factburntmpr * gdat.numbburn
if (gdat.boolsqzeprop or gdat.boolexplprop) and gdat.typeopti == 'hess':
raise Exception('')
print('gdat.boolpriopsfninfo')
print(gdat.boolpriopsfninfo)
print('gdat.typeexpr')
print(gdat.typeexpr)
## factor by which to thin the sweeps to get samples
if gdat.factthin is not None and gdat.numbsamp is not None:
raise Exception('Both factthin and numbparagenrfull cannot be provided at the same time.')
elif gdat.factthin is None and gdat.numbsamp is None:
gdat.factthin = int(np.ceil(1e-3 * (gdat.numbswep - gdat.numbburn)))
gdat.numbsamp = int((gdat.numbswep - gdat.numbburn) / gdat.factthin)
elif gdat.numbsamp is not None:
gdat.factthin = int((gdat.numbswep - gdat.numbburn) / gdat.numbsamp)
elif gdat.factthin is not None:
gdat.numbsamp = int((gdat.numbswep - gdat.numbburn) / gdat.factthin)
if not isinstance(gdat.numbsamp, int) or not isinstance(gdat.factthin, int) or \
not isinstance(gdat.numbburn, int) or not isinstance(gdat.numbswep, int):
print('gdat.numbsamp')
print(gdat.numbsamp)
print('gdat.factthin')
print(gdat.factthin)
print('gdat.numbburn')
print(gdat.numbburn)
print('gdat.numbswep')
print(gdat.numbswep)
raise Exception('Number of samples is not an integer.')
# samples to be saved
gdat.indxsamp = np.arange(gdat.numbsamp)
# samples to be saved from all chains
gdat.numbsamptotl = gdat.numbsamp * gdat.numbproc
gdat.indxsamptotl = np.arange(gdat.numbsamptotl)
gdat.numbsweptotl = gdat.numbswep * gdat.numbproc
if gdat.typeverb > 0:
print('%d samples will be taken, discarding the first %d. The chain will be thinned by a factor of %d.' % \
(gdat.numbswep, gdat.numbburn, gdat.factthin))
print('The resulting chain will contain %d samples per chain and %d samples in total.' % (gdat.numbsamp, gdat.numbsamptotl))
if gdat.anlytype is None:
if gdat.typeexpr == 'chan':
gdat.anlytype = 'home'
elif gdat.typeexpr == 'ferm':
gdat.anlytype = 'rec8pnts'
else:
gdat.anlytype = 'nomi'
if gdat.priofactdoff is None:
gdat.priofactdoff = 1.
# experiment defaults
if gdat.typeexpr == 'ferm':
gdat.lablenerunit = 'GeV'
if gdat.typeexpr == 'chan':
gdat.lablenerunit = 'keV'
if gdat.typeexpr == 'gene':
gdat.lablenerunit = ''
if gdat.typeexpr == 'fire':
gdat.lablenerunit = '$\mu$m^{-1}'
if gdat.typeexpr == 'ferm':
if gdat.anlytype[4:8] == 'pnts':
bins = np.logspace(np.log10(0.3), np.log10(10.), 4)
if gdat.anlytype[4:8] == 'back':
bins = np.logspace(np.log10(0.3), np.log10(300.), 31)
if gdat.typeexpr == 'chan':
if gdat.anlytype.startswith('home'):
bins = np.array([0.5, 0.91, 1.66, 3.02, 5.49, 10.])
if gdat.anlytype.startswith('extr'):
bins = np.array([0.5, 2., 8.])
if gdat.anlytype.startswith('spec'):
bins = np.logspace(np.log10(0.5), np.log10(10.), 21)
if gdat.typeexpr == 'fire':
bins = np.logspace(np.log10(1. / 2.5e-6), np.log10(1. / 0.8e-6), 31)
if gdat.typeexpr == 'hubb':
# temp
#bins = np.array([500., 750, 1000.])
bins = np.array([750, 1000.])
if gdat.typeexpr != 'gene':
setp_varb(gdat, 'enerfull', bins=bins)
setp_varb(gdat, 'numbpixl', lablroot='$N_{pix}$')
if gdat.expo is not None:
setp_varb(gdat, 'expo', minm=np.amin(gdat.expo), maxm=np.amax(gdat.expo), lablroot='$\epsilon$', cmap='OrRd', scal='logt')
# energy band string
if gdat.strgenerfull is None:
if gdat.typeexpr == 'tess':
gdat.strgenerfull = ['T']
if gdat.typeexpr == 'sdss':
gdat.strgenerfull = ['z-band', 'i-band', 'r-band', 'g-band', 'u-band']
if gdat.typeexpr == 'hubb':
#gdat.strgenerfull = ['F606W', 'F814W']
gdat.strgenerfull = ['F814W']
if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'fire':
gdat.strgenerfull = []
for i in range(len(gdat.binspara.enerfull) - 1):
gdat.strgenerfull.append('%.3g %s - %.3g %s' % (gdat.binspara.enerfull[i], gdat.lablenerunit, gdat.binspara.enerfull[i+1], gdat.lablenerunit))
if gdat.typeexpr == 'gene':
gdat.strgenerfull = ['']
## PSF class
if gdat.indxevttfull is None:
if gdat.typeexpr == 'ferm':
gdat.indxevttfull = np.arange(2)
else:
gdat.indxevttfull = np.arange(1)
if gdat.indxevttincl is None:
if gdat.typeexpr == 'ferm':
gdat.indxevttincl = np.array([0, 1])
else:
gdat.indxevttincl = np.arange(1)
if gdat.indxevttincl is not None:
gdat.evttbins = True
else:
gdat.evttbins = False
if gdat.evttbins:
gdat.numbevtt = gdat.indxevttincl.size
gdat.numbevttfull = gdat.indxevttfull.size
else:
gdat.numbevtt = 1
gdat.numbevttfull = 1
gdat.indxevttincl = np.array([0])
gdat.indxevtt = np.arange(gdat.numbevtt)
# Boolean flag to indicate that the data are binned in energy
if gdat.typeexpr == 'gene':
gdat.boolbinsener = False
else:
gdat.boolbinsener = True
if gdat.boolbinsener:
gdat.numbenerfull = len(gdat.strgenerfull)
else:
gdat.numbenerfull = 1
gdat.indxenerfull = np.arange(gdat.numbenerfull)
if gdat.typepixl is None:
if gdat.typeexpr == 'ferm':
gdat.typepixl = 'heal'
else:
gdat.typepixl = 'cart'
if gdat.boolbinsener:
gdat.meanpara.enerfull = np.sqrt(gdat.binspara.enerfull[1:] * gdat.binspara.enerfull[:-1])
setp_varb(gdat, 'boolmodipsfn', valu=False, strgmodl='fitt')
# default values for model types
print('Starting to determine the default values for model types using setp_varbvalu()...')
if gdat.typeexpr == 'hubb':
typeemishost = 'sers'
else:
typeemishost = 'none'
setp_varb(gdat, 'typeemishost', valu=typeemishost)
setp_varb(gdat, 'lliktotl', lablroot='$L$')
### background type
#### template
if gdat.typeexpr == 'ferm':
if gdat.anlytype == 'bfun':
gdat.ordrexpa = 10
gdat.numbexpasing = gdat.ordrexpa**2
gdat.numbexpa = gdat.numbexpasing * 4
gdat.indxexpa = np.arange(gdat.numbexpa)
typeback = ['bfun%04d' % k for k in gdat.indxexpa]
else:
typeback = [1., 'sbrtfdfmsmthrec8pntsnorm.fits']
if gdat.typeexpr == 'chan':
# particle background
if gdat.anlytype.startswith('spec'):
# temp -- this is fake!
sbrtparttemp = np.array([70.04, 70.04, 12.12, 15.98, 10.79, 73.59, 73.59])
binsenerpart = np.logspace(np.log10(0.5), np.log10(10.), 6)
meanenerpart = np.sqrt(binsenerpart[:-1] * binsenerpart[1:])
meanenerparttemp = np.concatenate((np.array([0.5]), meanenerpart, np.array([10.])))
typebacktemp = interp(gdat.meanpara.enerfull, meanenerparttemp, sbrtparttemp)
if gdat.anlytype.startswith('home') :
typebacktemp = 1.
#typebacktemp = np.array([70.04, 12.12, 15.98, 10.79, 73.59]) / 70.04
if gdat.anlytype.startswith('extr'):
#typebacktemp = 'sbrtchanback' + gdat.anlytype + '.fits'
typebacktemp = 1.
if gdat.anlytype.startswith('spec'):
typeback = [[1e2, 2.], typebacktemp]
else:
typeback = [1., typebacktemp]
if gdat.typeexpr == 'hubb':
typeback = [1.]
if gdat.typeexpr == 'tess':
typeback = [1.]
if gdat.typeexpr == 'gene':
typeback = [1.]
if gdat.typeexpr == 'fire':
typeback = [1.]
if gdat.typeexpr != 'user':
setp_varb(gdat, 'typeback', valu=typeback)
if gdat.typeexpr == 'hubb':
numbsersfgrd = 1
else:
numbsersfgrd = 0
setp_varb(gdat, 'numbsersfgrd', valu=numbsersfgrd)
if gdat.typeexpr == 'gene':
typeelem = ['clus']
if gdat.typeexpr == 'ferm':
typeelem = ['lghtpnts']
if gdat.typeexpr == 'tess':
typeelem = ['lghtpnts']
if gdat.typeexpr == 'chan':
typeelem = ['lghtpnts']
if gdat.typeexpr == 'hubb':
typeelem = ['lghtpnts', 'lens', 'lghtgausbgrd']
if gdat.typeexpr == 'fire':
typeelem = ['lghtlineabso']
if gdat.typeexpr == 'user':
typeelem = ['user']
setp_varb(gdat, 'typeelem', valu=typeelem)
print('gdat.fitt.typeelem')
print(gdat.fitt.typeelem)
### PSF model
#### angular profile
if gdat.typeexpr == 'ferm':
typemodlpsfn = 'doubking'
if gdat.typeexpr == 'chan':
typemodlpsfn = 'singking'
if gdat.typeexpr == 'sdss':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'hubb':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'tess':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'gene':
typemodlpsfn = 'singgaus'
if gdat.typeexpr == 'fire':
typemodlpsfn = None
if gdat.typeexpr != 'user':
setp_varb(gdat, 'typemodlpsfn', valu=typemodlpsfn)
#### background names
listnameback = ['isot']
if gdat.typeexpr == 'ferm':
listnameback.append('fdfm')
#if gdat.typeexpr == 'chan':
# listnameback.append('part')
setp_varb(gdat, 'listnameback', valu=listnameback)
if gdat.strgpdfn == 'prio':
gdat.lablsampdist = 'Prior'
if gdat.strgpdfn == 'post':
gdat.lablsampdist = 'Posterior'
for strgmodl in gdat.liststrgmodl:
# set up the indices of the model
setp_indxpara(gdat, 'init', strgmodl=strgmodl)
if gdat.numbswepplot is None:
gdat.numbswepplot = 50000
gdat.numbplotfram = gdat.numbswep / gdat.numbswepplot
#setp_varb(gdat, 'colr', valu='mediumseagreen', strgmodl='refr')
setp_varb(gdat, 'colr', valu='b', strgmodl='fitt')
if gdat.typedata == 'mock':
setp_varb(gdat, 'colr', valu='g', strgmodl='true')
#gdat.refr.colr = 'mediumseagreen'
#gdat.fitt.colr = 'deepskyblue'
gdat.minmmass = 1.
gdat.maxmmass = 10.
if gdat.checprio:
gdat.liststrgpdfn = ['prio', 'post']
else:
gdat.liststrgpdfn = ['post']
gdat.lablmass = 'M'
gdat.minmmassshel = 1e1
gdat.maxmmassshel = 1e5
gdat.lablmassshel = '$M_r$'
gdat.lablcurv = r'\kappa'
gdat.lablexpc = r'E_{c}'
gmod.scalcurvplot = 'self'
gmod.scalexpcplot = 'self'
#gdat.minmper0 = 1e-3
#gdat.maxmper0 = 1e1
#
#gdat.minmmagf = 10**7.5
#gdat.maxmmagf = 10**16
# temp -- automatize this eventually
#gmod.minmper0 = gdat.minmper0
#gmod.minmper0 = gdat.minmper0
#gmod.maxmper0 = gdat.maxmper0
#gmod.maxmper0 = gdat.maxmper0
#gmod.minmmagf = gdat.minmmagf
#gmod.minmmagf = gdat.minmmagf
#gmod.maxmmagf = gdat.maxmmagf
#gmod.maxmmagf = gdat.maxmmagf
gdat.fitt.listelemmrkr = ['+', '_', '3']
gdat.true.listmrkrhits = ['x', '|', '4']
gdat.true.listmrkrmiss = ['s', 'o', 'p']
gdat.true.listlablmiss = ['s', 'o', 'p']
# list of scalings
gdat.listscaltype = ['self', 'logt', 'atan', 'gaus', 'pois', 'expo']
# number of grids
gdat.numbgrid = 1
gdat.indxgrid = np.arange(gdat.numbgrid)
if gdat.typepixl == 'heal' and gdat.boolforccart:
raise Exception('Cartesian forcing can only used with cart typepixl')
gdat.liststrgphas = ['fram', 'finl', 'anim']
gdat.liststrgelemtdimtype = ['bind']
# lensing
## list of strings indicating different methods of calculating the subhalo mass fraction
gdat.liststrgcalcmasssubh = ['delt', 'intg']
# input data
if gdat.typedata == 'inpt':
path = gdat.pathinpt + gdat.strgexprsbrt
gdat.sbrtdata = astropy.io.fits.getdata(path)
if gdat.typepixl == 'heal' or gdat.typepixl == 'cart' and gdat.boolforccart:
if gdat.sbrtdata.ndim != 3:
raise Exception('exprsbrtdata should be a 3D numpy np.array if pixelization is HealPix.')
else:
if gdat.sbrtdata.ndim != 4:
raise Exception('exprsbrtdata should be a 4D numpy np.array if pixelization is Cartesian.')
if gdat.typepixl == 'cart' and not gdat.boolforccart:
gdat.sbrtdata = gdat.sbrtdata.reshape((gdat.sbrtdata.shape[0], -1, gdat.sbrtdata.shape[3]))
gdat.numbenerfull = gdat.sbrtdata.shape[0]
if gdat.typepixl == 'heal':
gdat.numbpixlfull = gdat.sbrtdata.shape[1]
elif gdat.boolforccart:
gdat.numbpixlfull = gdat.numbsidecart**2
else:
gdat.numbpixlfull = gdat.sbrtdata.shape[1] * gdat.sbrtdata.shape[2]
gdat.numbevttfull = gdat.sbrtdata.shape[2]
if gdat.typepixl == 'heal':
# temp
gdat.numbsidecart = 100
gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)
gdat.numbsideheal = int(np.sqrt(gdat.numbpixlfull / 12))
if gdat.typeexpr == 'hubb':
gdat.hubbexpofact = 1.63050e-19
if gdat.strgexpo is None:
if gdat.typeexpr == 'ferm':
gdat.strgexpo = 'expofermrec8pntsigal0256.fits'
if gdat.typeexpo is None:
if gdat.typeexpr == 'ferm':
gdat.typeexpo = 'file'
else:
gdat.typeexpo = 'cons'
print('strgexpo')
print(strgexpo)
## generative model
# the factor to convert radians (i.e., internal angular unit of PCAT) to the angular unit that will be used in the output (i.e., plots and tables)
if gdat.anglfact is None:
if gdat.typeexpr == 'ferm':
gdat.anglfact = 180. / np.pi
if gdat.typeexpr == 'tess':
gdat.anglfact = 60 * 180. / np.pi
if gdat.typeexpr == 'sdss' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'hubb':
gdat.anglfact = 3600 * 180. / np.pi
if gdat.typeexpr == 'sche' or gdat.typeexpr == 'gene':
gdat.anglfact = 1.
if gdat.numbsidecart is not None and gdat.typepixl == 'cart' and not gdat.boolforccart and isinstance(strgexpo, str):
raise Exception('numbsidecart argument should not be provided when strgexpo is a file name and pixelization is Cartesian.')
if gdat.typepixl == 'heal' or gdat.typepixl == 'cart' and gdat.boolforccart:
if gdat.numbsidecart is None:
gdat.numbsidecart = 100
# exposure
gdat.boolcorrexpo = gdat.expo is not None
if gdat.typeexpo == 'cons':
if gdat.typedata == 'mock':
if gdat.numbsidecart is None:
gdat.numbsidecart = 100
if gdat.typedata == 'mock':
if gdat.typepixl == 'heal':
gdat.expo = np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gdat.typepixl == 'cart':
gdat.expo = np.ones((gdat.numbenerfull, gdat.numbsidecart**2, gdat.numbevttfull))
if gdat.typedata == 'inpt':
gdat.expo = np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gdat.typeexpo == 'file':
path = gdat.pathinpt + gdat.strgexpo
if gdat.typeverb > 0:
print('Reading %s...' % path)
gdat.expo = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
gdat.expo = gdat.expo.reshape((gdat.expo.shape[0], -1, gdat.expo.shape[-1]))
if gdat.numbsidecart is None:
# temp -- gdat.numbsidecart takes the value of the region 0
if np.sqrt(gdat.expo.shape[1]) % 1. != 0.:
raise Exception('')
gdat.numbsidecart = int(np.sqrt(gdat.expo.shape[1]))
if gdat.typedata == 'mock':
if gdat.typepixl == 'cart':
gdat.numbpixlfull = gdat.numbsidecart**2
if gdat.typepixl == 'heal':
gdat.numbpixlfull = 12 * gdat.numbsideheal**2
# initialization type
if gdat.inittype is None:
gdat.inittype = 'rand'
if gdat.typeexpr != 'user':
# Boolean flag to indicate binning in space
gdat.boolbinsspat = gdat.numbpixlfull != 1
print('gdat.boolbinsspat')
print(gdat.boolbinsspat)
if gdat.boolcorrexpo and np.amin(gdat.expo) == np.amax(gdat.expo) and not isinstance(gdat.strgexpo, float):
raise Exception('Bad input exposure map.')
if gdat.boolbinsspat:
if gdat.typepixl == 'cart' and isinstance(gdat.strgexpo, float) and gdat.typedata == 'inpt':
if np.sqrt(gdat.sbrtdata.shape[1]) % 1. != 0.:
raise Exception('')
gdat.numbsidecart = int(np.sqrt(gdat.sbrtdata.shape[1]))
gdat.numbsidecarthalf = int(gdat.numbsidecart / 2)
if gdat.typepixl == 'cart':
gdat.numbpixlcart = gdat.numbsidecart**2
### spatial extent of the data
if gdat.maxmgangdata is None:
if gdat.typeexpr == 'chan':
gdat.maxmgangdata = 0.492 / gdat.anglfact * gdat.numbsidecarthalf
if gdat.typeexpr == 'ferm':
gdat.maxmgangdata = 15. / gdat.anglfact
if gdat.typeexpr == 'tess':
gdat.maxmgangdata = 20. / gdat.anglfact
if gdat.typeexpr == 'hubb':
gdat.maxmgangdata = 2. / gdat.anglfact
if gdat.typeexpr == 'gene':
gdat.maxmgangdata = 1. / gdat.anglfact
print('gdat.numbsidecart')
print(gdat.numbsidecart)
print('gdat.maxmgangdata')
print(gdat.maxmgangdata)
# pixelization
if gdat.typepixl == 'cart':
gdat.apix = (2. * gdat.maxmgangdata / gdat.numbsidecart)**2
if gdat.typepixl == 'heal':
temp, temp, temp, gdat.apix = tdpy.retr_healgrid(gdat.numbsideheal)
gdat.sizepixl = np.sqrt(gdat.apix)
# factor by which to multiply the y axis limits of the surface brightness plot
if gdat.numbpixlfull == 1:
gdat.factylimtbrt = [1e-4, 1e7]
else:
gdat.factylimtbrt = [1e-4, 1e3]
# grid
gdat.minmlgaldata = -gdat.maxmgangdata
gdat.maxmlgaldata = gdat.maxmgangdata
gdat.minmbgaldata = -gdat.maxmgangdata
gdat.maxmbgaldata = gdat.maxmgangdata
if gdat.typepixl == 'cart' and gdat.boolforccart:
if gdat.typedata == 'inpt':
sbrtdatatemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtdatatemp[i, :, m] = tdpy.retr_cart(gdat.sbrtdata[i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gdat.sbrtdata = sbrtdatatemp
if gdat.boolcorrexpo:
expotemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
expotemp[i, :, m] = tdpy.retr_cart(gdat.expo[i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gdat.expo = expotemp
gdat.sdenunit = 'degr'
gdat.factergskevv = 1.6e-9
if gdat.typeexpr == 'ferm':
gdat.listspecconvunit = [['en02', 'gevv']]
if gdat.typeexpr == 'chan':
gdat.listspecconvunit = [['en00', 'kevv'], ['en02', 'kevv'], ['en02', 'ergs'], ['en03', 'ergs', '0520', 0.5, 2.], \
['en03', 'ergs', '0210', 2., 10.], \
['en03', 'ergs', '0510', 0.5, 10.], \
['en03', 'ergs', '0208', 2., 8.], \
['en03', 'ergs', '0508', 0.5, 8.], \
['en03', 'ergs', '0207', 2., 7.], \
['en03', 'ergs', '0507', 0.5, 7.]]
if gdat.typeexpr == 'hubb':
gdat.listspecconvunit = [['en03', 'ergs']]
if gdat.typeexpr == 'fire':
gdat.listspecconvunit = [['en00', 'imum']]
# temp
#if gdat.typeexpr == 'chan' and (gdat.anlytype.startswith('home') or gdat.anlytype.startswith('extr')):
# gmod.lablpopl = ['AGN', 'Galaxy']
if gdat.typeexpr == 'ferm' or gdat.typeexpr == 'chan' or gdat.typeexpr == 'fire':
gdat.enerdiff = True
if gdat.typeexpr == 'hubb' or gdat.typeexpr == 'gene' or gdat.typeexpr == 'tess':
gdat.enerdiff = False
if gdat.indxenerincl is None:
# default
if gdat.boolbinsener:
gdat.indxenerincl = np.arange(gdat.binspara.enerfull.size - 1)
if gdat.typeexpr == 'ferm':
if gdat.anlytype[4:8] == 'pnts':
gdat.indxenerincl = np.arange(3)
if gdat.anlytype[4:8] == 'back':
gdat.indxenerincl = np.arange(30)
if gdat.typeexpr == 'chan':
if gdat.anlytype.startswith('home'):
gdat.indxenerincl = np.arange(5)
if gdat.anlytype.startswith('extr'):
gdat.indxenerincl = np.arange(2)
if gdat.typeexpr == 'hubb':
gdat.indxenerincl = np.array([0])
#gdat.indxenerincl = np.array([1])
#gdat.indxenerincl = np.array([0, 1])
if gdat.typeexpr == 'gene':
gdat.indxenerincl = np.array([0])
if gdat.indxenerincl is None:
gdat.numbener = 1
else:
gdat.numbener = gdat.indxenerincl.size
gdat.indxener = np.arange(gdat.numbener, dtype=int)
if gdat.indxenerincl is None:
gdat.indxenerincl = gdat.indxener
if gdat.boolbinsener:
gdat.indxenerinclbins = np.empty(gdat.numbener+1, dtype=int)
gdat.indxenerinclbins[0:-1] = gdat.indxenerincl
gdat.indxenerinclbins[-1] = gdat.indxenerincl[-1] + 1
gdat.indxenerpivt = 0
gdat.numbenerplot = 100
gdat.strgener = [gdat.strgenerfull[k] for k in gdat.indxenerincl]
gdat.binspara.ener = gdat.binspara.enerfull[gdat.indxenerinclbins]
gdat.meanpara.ener = np.sqrt(gdat.binspara.ener[1:] * gdat.binspara.ener[:-1])
gdat.deltener = gdat.binspara.ener[1:] - gdat.binspara.ener[:-1]
gdat.minmener = gdat.binspara.ener[0]
gdat.maxmener = gdat.binspara.ener[-1]
retr_axis(gdat, 'ener')
gdat.limtener = [np.amin(gdat.binspara.ener), np.amax(gdat.binspara.ener)]
if gdat.boolbinsener:
if gdat.numbener > 1:
gdat.enerpivt = gdat.meanpara.ener[gdat.indxenerpivt]
# energy bin indices other than that of the pivot bin
gdat.indxenerinde = np.setdiff1d(gdat.indxener, gdat.indxenerpivt)
# temp
if gdat.typeexpr == 'chan':
gdat.edis = 0.3 * | np.sqrt(gdat.binspara.ener) | numpy.sqrt |
# Copyright (c) 2019, <NAME>
import unittest
import unittest.mock as mock
import numpy as np
import transforms3d as tf3d
import pymodm
from pymodm.errors import ValidationError
from arvet.util.transform import Transform
from arvet.util.test_helpers import ExtendedTestCase
import arvet.database.tests.database_connection as dbconn
import arvet.database.image_manager as im_manager
from arvet.metadata.image_metadata import make_metadata, ImageSourceType
from arvet.core.image import Image
import arvet.core.tests.mock_types as mock_types
from arvet_slam.trials.slam.tracking_state import TrackingState
from arvet_slam.trials.slam.visual_slam import SLAMTrialResult, FrameResult
# ------------------------- FRAME RESULT -------------------------
class TestFrameResultMongoModel(pymodm.MongoModel):
frame_result = pymodm.fields.EmbeddedDocumentField(FrameResult)
class TestPoseErrorDatabase(unittest.TestCase):
image = None
@classmethod
def setUpClass(cls):
dbconn.connect_to_test_db()
dbconn.setup_image_manager()
pixels = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
cls.image = Image(
pixels=pixels, image_group='test',
metadata=make_metadata(pixels, source_type=ImageSourceType.SYNTHETIC)
)
cls.image.save()
@classmethod
def tearDownClass(cls):
# Clean up after ourselves by dropping the collection for this model
TestFrameResultMongoModel._mongometa.collection.drop()
Image._mongometa.collection.drop()
dbconn.tear_down_image_manager()
def test_stores_and_loads(self):
frame_result = FrameResult(
timestamp=10.3,
image=self.image,
processing_time=12.44,
pose=Transform((1, 2, 3), (4, 5, 6, 7)),
motion=Transform((-2, 1, -3), (-3, 5, 6, -6)),
estimated_pose=Transform((-1, -2, -3), (-4, -5, -6, 7)),
estimated_motion=Transform((2, -1, 3), (8, -5, -6, -6)),
tracking_state=TrackingState.NOT_INITIALIZED,
loop_edges=[6.223],
num_features=53,
num_matches=6
)
# Save the model
model = TestFrameResultMongoModel()
model.frame_result = frame_result
model.save()
# Load all the entities
all_entities = list(TestFrameResultMongoModel.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
# Something about these fields mean they need to be manually compared first
self.assertEqual(all_entities[0].frame_result.image, frame_result.image)
self.assertEqual(all_entities[0].frame_result.tracking_state, frame_result.tracking_state)
self.assertEqual(all_entities[0].frame_result.pose, frame_result.pose)
self.assertEqual(all_entities[0].frame_result.motion, frame_result.motion)
self.assertEqual(all_entities[0].frame_result.estimated_pose, frame_result.estimated_pose)
self.assertEqual(all_entities[0].frame_result.estimated_motion, frame_result.estimated_motion)
self.assertEqual(all_entities[0].frame_result, frame_result)
all_entities[0].delete()
def test_stores_and_loads_minimal(self):
frame_result = FrameResult(
timestamp=10.3,
image=self.image,
processing_time=12.44,
pose=Transform((1, 2, 3), (4, 5, 6, 7)),
motion=Transform((-2, 1, -3), (-3, 5, 6, -6))
)
# Save the model
model = TestFrameResultMongoModel()
model.frame_result = frame_result
model.save()
# Load all the entities
all_entities = list(TestFrameResultMongoModel.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0].frame_result.image, frame_result.image)
self.assertEqual(all_entities[0].frame_result.tracking_state, frame_result.tracking_state)
self.assertEqual(all_entities[0].frame_result.pose, frame_result.pose)
self.assertEqual(all_entities[0].frame_result.motion, frame_result.motion)
self.assertEqual(all_entities[0].frame_result.estimated_pose, frame_result.estimated_pose)
self.assertEqual(all_entities[0].frame_result.estimated_motion, frame_result.estimated_motion)
self.assertEqual(all_entities[0].frame_result, frame_result)
all_entities[0].delete()
def test_stores_and_loads_with_explicitly_null_estimates(self):
frame_result = FrameResult(
timestamp=10.3,
image=self.image,
processing_time=12.44,
pose=Transform((1, 2, 3), (4, 5, 6, 7)),
motion=Transform((-2, 1, -3), (-3, 5, 6, -6)),
estimated_pose=None,
estimated_motion=None
)
# Save the model
model = TestFrameResultMongoModel()
model.frame_result = frame_result
model.save()
# Load all the entities
all_entities = list(TestFrameResultMongoModel.objects.all())
self.assertGreaterEqual(len(all_entities), 1)
self.assertEqual(all_entities[0].frame_result.image, frame_result.image)
self.assertEqual(all_entities[0].frame_result.tracking_state, frame_result.tracking_state)
self.assertEqual(all_entities[0].frame_result.pose, frame_result.pose)
self.assertEqual(all_entities[0].frame_result.motion, frame_result.motion)
self.assertEqual(all_entities[0].frame_result.estimated_pose, frame_result.estimated_pose)
self.assertEqual(all_entities[0].frame_result.estimated_motion, frame_result.estimated_motion)
self.assertEqual(all_entities[0].frame_result, frame_result)
all_entities[0].delete()
def test_required_fields_are_required(self):
model = TestFrameResultMongoModel()
# no timestamp
model.frame_result = FrameResult(
image=self.image,
processing_time=12.44,
pose=Transform((1, 2, 3), (4, 5, 6, 7)),
motion=Transform((-2, 1, -3), (-3, 5, 6, -6))
)
with self.assertRaises(ValidationError):
model.save()
# no image
model.frame_result = FrameResult(
timestamp=10.3,
processing_time=12.44,
pose=Transform((1, 2, 3), (4, 5, 6, 7)),
motion=Transform((-2, 1, -3), (-3, 5, 6, -6))
)
with self.assertRaises(ValidationError):
model.save()
# no processing time
model.frame_result = FrameResult(
timestamp=10.3,
image=self.image,
pose=Transform((1, 2, 3), (4, 5, 6, 7)),
motion=Transform((-2, 1, -3), (-3, 5, 6, -6))
)
with self.assertRaises(ValidationError):
model.save()
# no pose
model.frame_result = FrameResult(
timestamp=10.3,
image=self.image,
processing_time=12.44,
motion=Transform((-2, 1, -3), (-3, 5, 6, -6))
)
with self.assertRaises(ValidationError):
model.save()
# no motion
model.frame_result = FrameResult(
timestamp=10.3,
image=self.image,
processing_time=12.44,
pose=Transform((1, 2, 3), (4, 5, 6, 7))
)
with self.assertRaises(ValidationError):
model.save()
# ------------------------- SLAM TRIAL RESULT -------------------------
class TestSLAMTrialResult(ExtendedTestCase):
def test_sorts_results_by_timestamp(self):
system = mock_types.MockSystem()
image_source = mock_types.MockImageSource()
mock_image = mock.create_autospec(Image)
timestamps = [(-1 ** idx) * idx + np.random.normal(0, 0.01) for idx in range(10)]
results = [
FrameResult(
timestamp=timestamp,
image=mock_image,
processing_time=np.random.uniform(0.01, 1),
pose=Transform(
(idx * 15 + np.random.normal(0, 1), idx + np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), idx * np.pi / 6), w_first=True
),
motion=Transform(
(np.random.normal(0, 1), np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), np.pi / 6), w_first=True
),
estimated_pose=Transform(
(idx * 15 + np.random.normal(0, 1), idx + np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), 9 * idx * np.pi / 36), w_first=True
),
estimated_motion=Transform(
(np.random.normal(0, 1), np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), idx * np.pi / 36), w_first=True
),
tracking_state=TrackingState.OK,
loop_edges=[np.random.choice(timestamps)],
num_features=np.random.randint(10, 1000),
num_matches=np.random.randint(10, 1000)
)
for idx, timestamp in enumerate(timestamps)
]
obj = SLAMTrialResult(
system=system,
image_source=image_source,
success=True,
settings={'key': 'value'},
results=results,
has_scale=True
)
for idx in range(1, len(results)):
self.assertGreater(obj.results[idx].timestamp, obj.results[idx - 1].timestamp)
def test_throws_exception_if_loop_edges_refer_to_missing_timestamp(self):
system = mock_types.MockSystem()
image_source = mock_types.MockImageSource()
mock_image = mock.create_autospec(Image)
timestamps = [(-1 ** idx) * idx + np.random.normal(0, 0.01) for idx in range(10)]
results = [
FrameResult(
timestamp=timestamp,
image=mock_image,
processing_time=np.random.uniform(0.01, 1),
pose=Transform(
(idx * 15 + np.random.normal(0, 1), idx + np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), idx * np.pi / 6), w_first=True
),
motion=Transform(
(np.random.normal(0, 1), np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), np.pi / 6), w_first=True
),
estimated_pose=Transform(
(idx * 15 + np.random.normal(0, 1), idx + np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), 9 * idx * np.pi / 36), w_first=True
),
estimated_motion=Transform(
(np.random.normal(0, 1), np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), idx * np.pi / 36), w_first=True
),
tracking_state=TrackingState.OK,
loop_edges=[np.random.choice(timestamps)],
num_features=np.random.randint(10, 1000),
num_matches=np.random.randint(10, 1000)
)
for idx, timestamp in enumerate(timestamps)
]
results[3].loop_edges.append(max(timestamps) + 0.23)
with self.assertRaises(ValueError):
SLAMTrialResult(
system=system,
image_source=image_source,
success=True,
settings={'key': 'value'},
results=results,
has_scale=True
)
def test_infers_motion_from_pose(self):
system = mock_types.MockSystem()
image_source = mock_types.MockImageSource()
mock_image = mock.create_autospec(Image)
timestamps = [idx + np.random.normal(0, 0.01) for idx in range(10)]
results = [
FrameResult(
timestamp=timestamp,
image=mock_image,
processing_time=np.random.uniform(0.01, 1),
pose=Transform(
(idx * 15 + np.random.normal(0, 1), idx + np.random.normal(0, 0.1), np.random.normal(0, 1)),
tf3d.quaternions.axangle2quat((1, 2, 3), idx * np.pi / 36), w_first=True
),
tracking_state=TrackingState.OK,
loop_edges=[np.random.choice(timestamps)],
num_features=np.random.randint(10, 1000),
num_matches= | np.random.randint(10, 1000) | numpy.random.randint |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from augmixations.core import (
generate_rect_coordinates,
insert_image_in_background,
check_middle_part_overlap_critical,
correct_box_if_full_side_overlap,
correct_box_if_some_alnge_overlap,
correct_background_boxes,
)
@pytest.mark.parametrize('params',
[(500, 500, 0, 0, 500, 500, 100, 100, 300, 300),
(1000, 2000, 0, 0, 2000, 1000, 300, 300, 600, 600),
(500, 1000, 0, 0, 500, 500, None, None, None, None),
(500, 700, None, None, None, None, None, None, None, None),
(300, 300, None, None, None, None, 100, 100, 300, 300)])
def test_generate_rect_coordinates(params):
img_h, img_w, min_x, min_y, max_x, max_y, min_h, min_w, max_h, max_w = params
rect = generate_rect_coordinates(img_h, img_w, min_x, min_y, max_x, max_y, min_h, min_w, max_h, max_w)
x1, y1, x2, y2 = rect
assert x1 < x2 and y1 < y2
assert x1 >= 0 and y1 >= 0
assert x2 <= img_w and y2 <= img_h
@pytest.mark.parametrize('params',
[(np.ones((500, 500, 3), dtype=np.uint8)*255,
np.ones((300, 300, 3), dtype=np.uint8)*255,
100, 100, 0, 0, 500, 500),
(np.ones((500, 700, 3), dtype=np.uint8)*255,
np.ones((300, 200, 3), dtype=np.uint8)*255,
100, 100, None, None, None, None), ])
def test_insert_image_in_background(params):
bg_img, fg_img, start_x, start_y, min_x, min_y, max_x, max_y = params
out_img, (shift_x, shift_y) = insert_image_in_background(
bg_img, fg_img, start_x, start_y, min_x, min_y, max_x, max_y)
if max_x is not None:
assert start_x + shift_x <= max_x
if min_x is not None:
assert start_x + shift_x >= min_x
if max_y is not None:
assert start_y + shift_y <= max_y
if min_y is not None:
assert start_y + shift_y >= min_y
assert out_img.shape == bg_img.shape
@pytest.mark.parametrize('params', [
({'x1': 75, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 75, 'x2': 125, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 75, 'y1': 75, 'x2': 125, 'y2': 175, 'area': 50*100, 'height': 100, 'width': 50},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
({'x1': 75, 'y1': 75, 'x2': 175, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.4, 0.9, 0.9, np.array([50, 50, 150, 150]), False),
({'x1': 75, 'y1': 75, 'x2': 175, 'y2': 125, 'area': 50*100, 'height': 50, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.3, 0.9, 0.9, np.array([50, 50, 150, 150]), True),
# Вариант изменения бокса,
# если перекрыта большая часть одной из сторон
({'x1': 75, 'y1': 25, 'x2': 225, 'y2': 100, 'area': 75*150, 'height': 75, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.9, 0.6, np.array([50, 100, 250, 250]), False),
({'x1': 75, 'y1': 200, 'x2': 225, 'y2': 300, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.9, 0.6, np.array([50, 50, 250, 200]), False),
({'x1': 25, 'y1': 75, 'x2': 100, 'y2': 225, 'area': 75*150, 'height': 150, 'width': 75},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.6, 0.9, np.array([100, 50, 250, 250]), False),
({'x1': 200, 'y1': 75, 'x2': 300, 'y2': 225, 'area': 100*150, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.9, 0.6, 0.9, np.array([50, 50, 200, 250]), False),
# Вставленная картинка в центре бокса, но очень большая
({'x1': 55, 'y1': 55, 'x2': 245, 'y2': 245, 'area': 190*190, 'height': 190, 'width': 190},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.5, 0.9, 0.9, np.array([50, 50, 250, 250]), True),
# Вставленная картинка с полным вертикальным перекрытием по центру
({'x1': 55, 'y1': 0, 'x2': 245, 'y2': 300, 'area': 190*300, 'height': 300, 'width': 190},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.5, 0.9, 0.9, np.array([50, 50, 250, 250]), True),
# Вставленная картинка с полным горизонтальным перекрытием по центру
({'x1': 0, 'y1': 55, 'x2': 300, 'y2': 245, 'area': 190*300, 'height': 190, 'width': 300},
{'x1': 50, 'y1': 50, 'x2': 250, 'y2': 250, 'area': 200*200, 'height': 200, 'width': 200},
0.5, 0.9, 0.9, np.array([50, 50, 250, 250]), True),
])
def test_check_middle_part_overlap_critical(params):
rect_info, box_info, max_overlap_area_ratio, max_h_overlap, max_w_overlap, true_box, true_overlap = params
new_box, critical_overlap = check_middle_part_overlap_critical(
rect_info,
box_info,
max_overlap_area_ratio,
max_h_overlap,
max_w_overlap,
debug=True,
label='Test box',)
assert critical_overlap == true_overlap
assert np.array_equal(true_box, new_box)
@pytest.mark.parametrize('params', [
# Перекрытие одной из сторон полностью
# коэф. перекрываемой прощади для всех вариантов - 0.75
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 175, 'area': 100*150, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([125, 50, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 175, 'area': 100*150, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([125, 50, 150, 150]), True),
({'x1': 25, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([50, 125, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 175, 'y2': 125, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([50, 125, 150, 150]), True),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([50, 50, 75, 150]), False),
({'x1': 75, 'y1': 25, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 150, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([50, 50, 75, 150]), True),
({'x1': 25, 'y1': 75, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.8, np.array([50, 50, 150, 75]), False),
({'x1': 25, 'y1': 75, 'x2': 175, 'y2': 175, 'area': 150*100, 'height': 100, 'width': 150},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, np.array([50, 50, 150, 75]), True),
])
def test_correct_box_if_full_side_overlap(params):
rect_info, box_info, max_overlap_area_ratio, true_box, real_overlap = params
new_box, critical_overlap = correct_box_if_full_side_overlap(
rect_info, box_info,
max_overlap_area_ratio,
debug=True,
label='Test box',)
assert real_overlap == critical_overlap
assert np.array_equal(true_box, new_box)
@pytest.mark.parametrize('params', [
# Тест перекрыт какой-либо из углов угол, проверка поочерёдно:
# 1. Обычное перекрытие проходящее по всем порогам
# 2. Уменьшение порога площади перекрытия, мин. коэф = 0.5625
# 3. Увеличение порога минимальной ширины, мин. коэф = 0.25
# 4. Увеличение порога минимальной высоты, мин. коэф = 0.25
# 5. Увеличение погоров вин ширины и высоты и уменьшения порога мин площади
# ЛЕВЫЙ ВЕРХНИЙ УГОЛ
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.7, np.array([50, 50, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.9, 0.5, np.array([50, 50, 150, 150]), True),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.7, 0.9, 0.7, np.array([125, 50, 150, 150]), False),
({'x1': 25, 'y1': 25, 'x2': 125, 'y2': 125, 'area': 100*100, 'height': 100, 'width': 100},
{'x1': 50, 'y1': 50, 'x2': 150, 'y2': 150, 'area': 100*100, 'height': 100, 'width': 100},
0.9, 0.7, 0.7, | np.array([50, 125, 150, 150]) | numpy.array |
"""
@author: <NAME> (andreww(at)email(dot)sc(dot)edu)
Script used to generate optimization-based whitebox attacks and test them
against
"""
import argparse
import numpy as np
import pandas as pd
import os
import time
from matplotlib import pyplot as plt
import scipy.io
from utils.model import load_pool, load_lenet
from models.athena import Ensemble, ENSEMBLE_STRATEGY
from utils.file import load_from_json
from utils.metrics import error_rate, get_corrections
from attacks.attack import generate
def generate_whitebox_ae(model, data, labels, attack_configs,
eot=False,
save=False, output_dir=None):
"""
Generate whitebox adversarial examples using the optimization approach.
:param model: The targeted model. For a whitebox attack, this should be
the full defended model.
:param data: array. The benign samples to generate adversarial for.
:param labels: array or list. The true labels.
:param attack_configs: dictionary. Attacks and corresponding settings.
:param save: boolean. True, if save the adversarial examples.
:param output_dir: str or path. Location to save the adversarial examples.
It cannot be None when save is True.
:return:
"""
img_rows, img_cols = data.shape[1], data.shape[2]
num_attacks = attack_configs.get("num_attacks")
data_loader = (data, labels)
if len(labels.shape) > 1:
labels = [np.argmax(p) for p in labels] #returns correct label
# initialize array for storing predicted values for each attack
# each row corresponds to an image from the MNIST dataset
# the first column contains the true values, and each subsequent column
# contains the predicted values for each attack.
# The array is initialized with '-1' at all elements so that any values
# which are not overwritten with digits 0-9 are identifiable as erroneous
dataTable = -np.ones((num_images, num_attacks+1), dtype = int)
dataTable[:,0] = labels;
for id in range(num_attacks): #outer loop steps through attacks
key = "configs{}".format(id)
attack_args = attack_configs.get(key)
attack_args["eot"] = eot
data_adv = generate(model=model,
data_loader=data_loader,
attack_args=attack_args
)
# predict the adversarial examples
predictions = model.predict(data_adv)
predictions = [np.argmax(p) for p in predictions]
err_rate = error_rate(np.asarray(predictions), np.asarray(labels));
print('>>>Error Rate: ',err_rate)
dataTable[:,id+1] = predictions #insert predicted values into new column
# plotting some examples
num_plotting = min(data.shape[0], 2)
for i in range(num_plotting): #inner loop steps through images to plot
img = data_adv[i].reshape((img_rows, img_cols))
plt.imshow(img, cmap='gray')
title = '{}: {}->{}'.format(attack_configs.get(key).get("description"),
labels[i],
predictions[i])
plt.title(title)
plt.show()
plt.close()
# save the adversarial example
if save:
if output_dir is None:
raise ValueError("Cannot save images to a none path.")
# save with a random name
file = os.path.join(output_dir, "ae_whitebox_{}_EOToff.npy".format(attack_configs.get(key).get("description")))
print("Saving the adversarial examples to file [{}].".format(file))
np.save(file, data_adv)
if save:
file = os.path.join(output_dir, "dataTable2.mat")
print("Saving dataTable to "+file)
scipy.io.savemat(file, {'dataTable2':dataTable})
def evaluate_baseline_attacks(trans_configs, model_configs,
data_configs, num_images, save=False, output_dir=None):
"""
Apply transformation(s) on images.
:param trans_configs: dictionary. The collection of the parameterized transformations to test.
in the form of
{ configsx: {
param: value,
}
}
The key of a configuration is 'configs'x, where 'x' is the id of corresponding weak defense.
:param model_configs: dictionary. Defines model related information.
Such as, location, the undefended model, the file format, etc.
:param data_configs: dictionary. Defines data related information.
Such as, location, the file for the true labels, the file for the benign samples,
the files for the adversarial examples, etc.
:param labels: the correct labels for each image
:param save: boolean. Save the transformed sample or not.
:param output_dir: path or str. The location to store the transformed samples.
It cannot be None when save is True.
:return:
"""
'''
# Load the baseline defense (PGD-ADT model)
pgd_adt = load_lenet(file=model_configs.get('pgd_trained'), trans_configs=None,
use_logits=False, wrap=False)
'''
# get the undefended model (UM)
file = os.path.join(model_configs.get('dir'), model_configs.get('um_file'))
undefended = load_lenet(file=file,
trans_configs=trans_configs.get('configs0'),
wrap=True)
print(">>> um:", type(undefended))
# load weak defenses into a pool
pool, _ = load_pool(trans_configs=trans_configs,
model_configs=model_configs,
active_list=True,
wrap=True)
# create AVEP and MV ensembles from the WD pool
wds = list(pool.values())
print(">>> wds:", type(wds), type(wds[0]))
#ensemble_AVEP = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)
ensemble_MV = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.MV.value)
# load the benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
img_rows, img_cols = x_bs.shape[1], x_bs.shape[2]
# load the corresponding true labels
label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))
labels = np.load(label_file)
if len(labels.shape) > 1:
labels = [np.argmax(p) for p in labels] #returns correct label
# cut dataset to specified number of images
x_bs = x_bs[:num_images]
labels = labels[:num_images]
# get indices of benign samples that are correctly classified by the targeted model
print(">>> Evaluating UM on [{}], it may take a while...".format(bs_file))
pred_bs = undefended.predict(x_bs)
corrections = get_corrections(y_pred=pred_bs, y_true=labels)
pred_bs = [np.argmax(p) for p in pred_bs]
# Evaluate AEs.
ae_list = data_configs.get('ae_files')
print(">>>>>>> AE list: ", ae_list)
predictionData = -np.ones((num_images, len(ae_list)), dtype = int)
for ae_count in range(len(ae_list)): # step through ae's one by one
ae_file = os.path.join(data_configs.get('dir'), ae_list[ae_count])
x_adv = | np.load(ae_file) | numpy.load |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import scipy
warnings.filterwarnings('ignore')
import matplotlib
import time#test time
import scipy.signal as signal#fftconvolve
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import caffe
import numpy as np
import os
from PIL import Image #for open image
from skimage.transform import resize
import math # for IPM
size = 1
ratio = 8
def Find_local_maximum(datase):
t = time.time()
window = signal.general_gaussian(51, p=0.5, sig=1)
peak_p = np.zeros([60, 80])
peak_p = np.argmax(datase, 2)
seed_x = [] #seed points for IPM
seed_y = [] #seed points for IPM
print("fft_convolve time 1 :", time.time() - t)
#For better local maximum
peak_p = np.max(datase, 2)
print('shape:',np.shape(peak_p))
t = time.time()
for i in range(0, 60):
peak_row = datase[i, :, :]
if(sum(np.argmax(peak_row, 1)) == 0):
continue
j = 0
while(j < 79):
l = np.array([])
while(np.argmax(peak_row[j]) > 0 and np.argmax(peak_row[j]) == np.argmax(peak_row[j+1]) and j < 79):
l = np.append(l, max(peak_row[j]))
j += 1
j += 1
if(len(l) > 0):
l = np.append(l, max(peak_row[j]))
#middle point
max_idx = j - len(l.tolist()) // 2 - 1
#local maximum
#max_idx = j - np.where(l == max(l))[0]
seed_y.append(max_idx)
seed_x.append(i)
print("Time of fftconvolve 2: ", time.time() - t)
return np.array(seed_y), | np.array(seed_x) | numpy.array |
"""
Solvers
-------
This part of the package provides wrappers around Assimulo solvers.
"""
from assimulo.problem import Explicit_Problem
import numpy as np
import sys
from means.simulation import SensitivityTerm
from means.simulation.trajectory import Trajectory, TrajectoryWithSensitivityData
import inspect
from means.util.memoisation import memoised_property, MemoisableObject
from means.util.sympyhelpers import to_one_dim_array
NP_FLOATING_POINT_PRECISION = np.double
#-- Easy initialisation utilities -------------------------------------------------------------
class UniqueNameInitialisationMixin(object):
@classmethod
def unique_name(self):
return NotImplemented
class SolverException(Exception):
__base_exception_class = None
__base_exception_kwargs = None
def __init__(self, message, base_exception=None):
if base_exception is not None:
if message is None:
message = ''
# We need to take message argument as otherwise SolverException is unpickleable
message += '{0.__class__.__name__}: {0!s}'.format(base_exception)
super(SolverException, self).__init__(message)
# CVodeError does not serialise well, so let's store it as a set of arguments and create the base exception
# on the fly, rather than storing the actual object
if base_exception is not None:
self.__base_exception_class = base_exception.__class__
self.__base_exception_kwargs = base_exception.__dict__.copy()
@property
def base_exception(self):
if self.__base_exception_class is not None:
return self.__base_exception_class(**self.__base_exception_kwargs)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.message == other.message and self.__base_exception_class == other.__base_exception_class and \
self.__base_exception_kwargs == other.__base_exception_kwargs
def available_solvers(with_sensitivity_support=False):
members = inspect.getmembers(sys.modules[__name__])
initialisable_solvers = {}
# Some metaprogramming here: look for all classes at this module that are subclasses of
# `UniqueNameInitialisationMixin`. Compile a dictionary of these
for name, object in members:
if inspect.isclass(object) and issubclass(object, SolverBase) \
and issubclass(object, UniqueNameInitialisationMixin) \
and object != UniqueNameInitialisationMixin:
if with_sensitivity_support and not issubclass(object, SensitivitySolverBase):
# If we need sensitivity support, skip all non-sensitivity solvers
continue
elif not with_sensitivity_support and issubclass(object, SensitivitySolverBase):
# If we don't need sensitivity support, skip all solvers with sensitivity support
continue
assert(object.unique_name not in initialisable_solvers)
initialisable_solvers[object.unique_name().lower()] = object
return initialisable_solvers
#-- Exception handling utilities -----------------------------------------------------------
def parse_flag(exception_message):
"""
Parse the flag from the solver exception.
e.g.
>>> parse_flag("Exception: Dopri5 failed with flag -3")
-3
:param exception_message: message from the exception
:type exception_message: str
:return: flag id
:rtype: int
"""
import re
match = re.match('.* failed with flag (-\d+)', exception_message)
try:
return int(match.group(1))
except Exception:
return None
#-- Base solver functionality ---------------------------------------------------------------
def _set_kwargs_as_attributes(instance, **kwargs):
for attribute, value in kwargs.iteritems():
setattr(instance, attribute, value)
return instance
def _wrap_results_to_trajectories(simulated_timepoints, simulated_values, descriptions):
number_of_timepoints, number_of_simulated_values = simulated_values.shape
assert(len(descriptions) == number_of_simulated_values)
assert(len(simulated_timepoints) == number_of_timepoints)
# Wrap results to trajectories
trajectories = []
for description, simulated_value_column in zip(descriptions, simulated_values.T):
trajectories.append(Trajectory(simulated_timepoints, simulated_value_column, description))
return trajectories
class SolverBase(MemoisableObject):
"""
This acts as a base class for ODE solvers used in `means`.
It wraps around the solvers available in :module:`assimulo` package, and provides some basic functionality
that allows solvers be used with `means` objects.
"""
_parameters = None
_initial_conditions = None
_problem = None
_starting_time = None
_options = None
def __init__(self, problem, parameters, initial_conditions, starting_time=0.0, **options):
"""
:param problem: Problem to simulate
:type problem: :class:`~means.approximation.ODEProblem`
:param parameters: Parameters of the solver. One entry for each constant in `problem`
:type parameters: :class:`iterable`
:param initial_conditions: Initial conditions of the system. One for each of the equations.
Assumed to be zero, if not specified
:type initial_conditions: :class:`iterable`
:param starting_time: Starting time for the solver, defaults to 0.0
:type starting_time: float
:param options: Options to be passed to the specific instance of the solver.
"""
parameters = to_one_dim_array(parameters, dtype=NP_FLOATING_POINT_PRECISION)
initial_conditions = to_one_dim_array(initial_conditions, dtype=NP_FLOATING_POINT_PRECISION)
assert(parameters.shape == (len(problem.parameters),))
assert(initial_conditions.shape[0] == problem.number_of_equations)
self._parameters = parameters
self._initial_conditions = initial_conditions
self._starting_time = float(starting_time)
self._problem = problem
self._options = options
def simulate(self, timepoints):
"""
Simulate initialised solver for the specified timepoints
:param timepoints: timepoints that will be returned from simulation
:return: a list of trajectories for each of the equations in the problem.
"""
solver = self._solver
last_timepoint = timepoints[-1]
try:
simulated_timepoints, simulated_values = solver.simulate(last_timepoint, ncp_list=timepoints)
except (Exception, self._solver_exception_class) as e:
# The exceptions thrown by solvers are usually hiding the real cause, try to see if it is
# our right_hand_side_as_function that is broken first
try:
self._problem.right_hand_side_as_function(self._initial_conditions, self._parameters)
except:
# If it is broken, throw that exception instead
raise
else:
# If it is not, handle the original exception
self._handle_solver_exception(e)
trajectories = self._results_to_trajectories(simulated_timepoints, simulated_values)
return trajectories
def _handle_solver_exception(self, solver_exception):
"""
This function handles any exceptions that occurred in the solver and have been proven not to be
related to our right_hand_side function.
Subclasses can override it.
:param solver_exception: the exception raised by the solver
:type solver_exception: Exception
"""
# By default just re-raise it with our wrapper
raise SolverException(None, solver_exception)
def _default_solver_instance(self):
raise NotImplementedError
@property
def _solver_exception_class(self):
"""
Property That would return the exception class thrown by a specific solver the subclases can override.
"""
return None
@memoised_property
def _solver(self):
solver = self._default_solver_instance()
verbosity = self._options.pop('verbosity', 50)
return _set_kwargs_as_attributes(solver, verbosity=verbosity, **self._options)
@memoised_property
def _assimulo_problem(self):
rhs = self._problem.right_hand_side_as_function
parameters = self._parameters
initial_conditions = self._initial_conditions
initial_timepoint = self._starting_time
model = Explicit_Problem(lambda t, x: rhs(x, parameters),
initial_conditions, initial_timepoint)
return model
def _results_to_trajectories(self, simulated_timepoints, simulated_values):
"""
Convert the resulting results into a list of trajectories
:param simulated_timepoints: timepoints output from a solver
:param simulated_values: values returned by the solver
:return:
"""
descriptions = self._problem.left_hand_side_descriptors
return _wrap_results_to_trajectories(simulated_timepoints, simulated_values, descriptions)
class CVodeMixin(UniqueNameInitialisationMixin, object):
@classmethod
def unique_name(cls):
return 'cvode'
@property
def _solver_exception_class(self):
from assimulo.solvers.sundials import CVodeError
return CVodeError
def _cvode_instance(self, model, options):
from assimulo.solvers.sundials import CVode
solver = CVode(model)
if 'usesens' in options:
raise AttributeError('Cannot set \'usesens\' parameter. Use Simulation or SimulationWithSensitivities for '
'sensitivity calculations')
return solver
class CVodeSolver(SolverBase, CVodeMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to false here as we are non-parametric here
solver.usesens = False
return solver
class ODE15sMixin(CVodeMixin):
"""
A CVODE solver that mimicks the parameters used in `ode15s`_ solver in MATLAB.
The different parameters that are set differently by default are:
``discr``
Set to ``'BDF'`` by default
``atol``
Set to ``1e-6``
``rtol``
Set to ``1e-3``
.. _`ode15s`: http://www.mathworks.ch/ch/help/matlab/ref/ode15s.html
"""
ATOL = 1e-6
RTOL = 1e-3
MINH = 5.684342e-14
@classmethod
def unique_name(cls):
return 'ode15s'
def _cvode_instance(self, model, options):
solver = super(ODE15sMixin, self)._cvode_instance(model, options)
# BDF method below makes it a key similarity to the ode15s
solver.discr = options.pop('discr', 'BDF')
solver.atol = options.pop('atol', self.ATOL)
solver.rtol = options.pop('rtol', self.RTOL)
solver.maxord = options.pop('maxord', 5)
# If minh is not set, CVODE would try to continue the simulation, issuing a warning
# We set it here so this simulation fails.
solver.minh = options.pop('minh', self.MINH)
return solver
class ODE15sLikeSolver(SolverBase, ODE15sMixin):
def _default_solver_instance(self):
solver = self._cvode_instance(self._assimulo_problem, self._options)
# It is necessary to set usesens to false here as we are non-parametric here
solver.usesens = False
return solver
class Dopri5Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers.runge_kutta import Dopri5
return Dopri5(self._assimulo_problem)
@classmethod
def unique_name(self):
return 'dopri5'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Problem is probably stiff'}
new_message = None
try:
new_message = 'Dopri5 failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(Dopri5Solver, self)._handle_solver_exception(exception)
class LSODARSolver(SolverBase, UniqueNameInitialisationMixin):
@property
def _solver_exception_class(self):
from assimulo.exception import ODEPACK_Exception
return ODEPACK_Exception
def _default_solver_instance(self):
from assimulo.solvers import LSODAR
return LSODAR(self._assimulo_problem)
@classmethod
def unique_name(self):
return 'lsodar'
def _handle_solver_exception(self, solver_exception):
flag = parse_flag(solver_exception.message)
from assimulo.exception import ODEPACK_Exception
FLAG_DOCUMENTATION = {-1: 'Excess work done on this call (perhaps wrong jt)',
-2: 'Excess accuracy requested (tolerances too small)',
-3: 'Illegal input detected (see printed message)',
-4: 'Repeated error test failures (check all inputs)',
-5: 'Repeated convergence failures (perhaps bad jacobian supplied or wrong choice of '
'jt or tolerances)',
-6: 'Error weight became zero during problem.',
-7: 'Work space insufficient to finish (see messages)'}
new_message = None
try:
new_message = 'LSODAR failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = ODEPACK_Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(LSODARSolver, self)._handle_solver_exception(exception)
class ExplicitEulerSolver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import ExplicitEuler
return ExplicitEuler(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'euler'
def simulate(self, timepoints):
# Euler solver does not return the correct timepoints for some reason, work around that by resampling them
trajectories = super(ExplicitEulerSolver, self).simulate(timepoints)
resampled_trajectories = []
for trajectory in trajectories:
resampled_trajectories.append(trajectory.resample(timepoints))
return resampled_trajectories
class RungeKutta4Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RungeKutta4
return RungeKutta4(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rungekutta4'
def simulate(self, timepoints):
# RungeKutta4 solver does not return the correct timepoints for some reason, work around that by resampling them
trajectories = super(RungeKutta4Solver, self).simulate(timepoints)
resampled_trajectories = []
for trajectory in trajectories:
resampled_trajectories.append(trajectory.resample(timepoints))
return resampled_trajectories
class RungeKutta34Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RungeKutta34
return RungeKutta34(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rungekutta34'
class Radau5Solver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import Radau5ODE
return Radau5ODE(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'radau5'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Matrix is repeatedly singular'}
new_message = None
try:
new_message = 'Radau5 failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(Radau5Solver, self)._handle_solver_exception(exception)
class RodasSolver(SolverBase, UniqueNameInitialisationMixin):
def _default_solver_instance(self):
from assimulo.solvers import RodasODE
return RodasODE(self._assimulo_problem)
@classmethod
def unique_name(cls):
return 'rodas'
def _handle_solver_exception(self, solver_exception):
# Let's try and parse the exception flag, to add some helpful info
flag = parse_flag(solver_exception.message)
FLAG_DOCUMENTATION = {-1: 'Input is not consistent',
-2: 'Larger NMAX is needed',
-3: 'Step size becomes too small',
-4: 'Matrix is repeatedly singular'}
new_message = None
try:
new_message = 'Rodas failed with flag {0}: {1}'.format(flag, FLAG_DOCUMENTATION[flag])
exception = Exception(new_message)
except KeyError:
# We have no documentation for this exception, let's just reraise it
exception = solver_exception
# Use the superclass method to rethrow the exception with our wrapper
super(RodasSolver, self)._handle_solver_exception(exception)
#-- Solvers with sensitivity support -----------------------------------------------------------------------------------
def _add_sensitivity_data_to_trajectories(trajectories, raw_sensitivity_data, parameters):
sensitivity_values = []
for i, trajectory in enumerate(trajectories):
ode_term = trajectory.description
term_sensitivities = []
for j, parameter in enumerate(parameters):
term_sensitivities.append((parameter, raw_sensitivity_data[j, :, i]))
sensitivity_values.append(term_sensitivities)
trajectories_with_sensitivity_data = []
for trajectory, sensitivities in zip(trajectories, sensitivity_values):
# Collect the sensitivities into a nice dictionary of Trajectory objects
sensitivity_trajectories = []
for parameter, values in sensitivities:
sensitivity_trajectories.append(Trajectory(trajectory.timepoints, values,
SensitivityTerm(trajectory.description, parameter)))
trajectory_with_sensitivities = TrajectoryWithSensitivityData.from_trajectory(trajectory,
sensitivity_trajectories)
trajectories_with_sensitivity_data.append(trajectory_with_sensitivities)
return trajectories_with_sensitivity_data
class SensitivitySolverBase(SolverBase):
@property
def _assimulo_problem(self):
rhs = self._problem.right_hand_side_as_function
parameters = self._parameters
initial_conditions = self._initial_conditions
initial_timepoint = self._starting_time
# Solvers with sensitivity support should be able to accept parameters
# into rhs function directly
model = Explicit_Problem(lambda t, x, p: rhs(x, p),
initial_conditions, initial_timepoint)
model.p0 = | np.array(parameters) | numpy.array |
""" Sphere simulation and position tools
Geometry simulation:
* :py:func:`simulate_spheres_in_sphere`: Simulate a random sphere packing of hard
spheres of identical radius inside a larger sphere
Neighbor counting:
* :py:func:`nearest_neighbors`: Calculate the distance to the nth closest point to
a given set of points
* :py:func:`count_neighbors`: Calculte the number of points within a radial neighborhood
Point manipulation tools:
* :py:func:`split_red_green`: Split a point list into red/green with a given
probability distribution
* :py:func:`mask_points`: Subset point lists based on a mask
* :py:func:`concat_points`: Concatenate point lists
"""
# Imports
from typing import Tuple, List
# 3rd party
import numpy as np
from sklearn.neighbors import BallTree
# Our own imports
from . import _simulation
from .consts import (
NUM_RED, NUM_GREEN, AGGREGATE_RADIUS, SAME_CELL_RADIUS, NEIGHBOR_RADIUS,
)
# Neighbor counting
def nearest_neighbors(red_points: np.ndarray,
green_points: np.ndarray,
num_closest: int = 1) -> np.ndarray:
""" Find the closest green point to a red point
:param red_points:
The n x 3 array of red points
:param green_points:
The m x 3 array of green points
:param num_closest:
The nth closest point to return
:returns:
An n x 3 array of distances to green points for each red point
"""
red_points = np.stack(red_points, axis=1)
green_points = np.stack(green_points, axis=1)
tree = BallTree(green_points)
return tree.query(red_points, k=num_closest, return_distance=True)[0][:, num_closest-1]
def count_neighbors(red_points: np.ndarray,
green_points: np.ndarray,
radius: float = NEIGHBOR_RADIUS) -> np.ndarray:
""" Count the number of neighbors within a radius
:param ndarray red_points:
The n x 3 array of red points
:param ndarray green_points:
The m x 3 array of green points
:param float radius:
The radius within which a point is a neighbor
:returns:
An n x 3 array of counts of green points near each red point
"""
red_points = np.stack(red_points, axis=1)
green_points = np.stack(green_points, axis=1)
tree = BallTree(green_points)
return tree.query_radius(red_points, r=radius, count_only=True)
# Point manipulation tools
def mask_points(points: List[np.ndarray],
mask: np.ndarray) -> Tuple[np.ndarray]:
""" Mask off the points
:param List[ndarray] points:
List of 1D point arrays to mask
:param ndarray mask:
Mask for those arrays
:returns:
The same set of points, but masked
"""
points = np.stack(points, axis=1)
points = points[mask, :]
return points[:, 0], points[:, 1], points[:, 2]
def concat_points(*args) -> Tuple[np.ndarray]:
""" Concatenate all the points
:param \\*args:
List of ndarray tuples to concatenate
:returns:
An x, y, z tuple of all the points
"""
final_x = []
final_y = []
final_z = []
for (x, y, z) in args:
if x.ndim == 0:
assert y.ndim == 0
assert z.ndim == 0
continue
assert x.shape[0] == y.shape[0]
assert x.shape[0] == z.shape[0]
final_x.append(x)
final_y.append(y)
final_z.append(z)
return (np.concatenate(final_x),
np.concatenate(final_y),
np.concatenate(final_z))
def split_red_green(all_points: Tuple[np.ndarray],
num_red: int = NUM_RED,
num_green: int = NUM_GREEN,
udist: str = 'uniform') -> Tuple[Tuple[np.ndarray]]:
""" Split into red and green cells
:param Tuple[ndarray] all_points:
The list of coordinates to split into red and green
:param int num_red:
The number of points to assign to red
:param int num_green:
The number of points to assign to green
:param str udist:
Distribution for the red points
:returns:
A tuple of (red, green) points
"""
x, y, z = all_points
all_radii = np.sqrt(x**2 + y**2 + z**2)
all_indices = np.arange(all_radii.shape[0])
# Various distributions
if udist == 'uniform':
all_prob = | np.ones_like(all_radii) | numpy.ones_like |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(225, 'F m -3 m', transformations)
space_groups[225] = sg
space_groups['F m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(226, 'F m -3 c', transformations)
space_groups[226] = sg
space_groups['F m -3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(227, 'F d -3 m :2', transformations)
space_groups[227] = sg
space_groups['F d -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,1,0,1,0,0,0,0,1]) | numpy.array |
"""
Generates plots according to an execution stats trace, and saves them into a /Plot/ folder.
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
sys.path.insert(1, "../")
from Utils.Stats import unpack_stats
# Path to the execution folder
path = "../temp/NNSGA_4f0/"
# Indicates if the plots should be generated for every sub directory of the 'path' folder
sub_directories = False
def plot_raw(key, dic, destination):
"""Plot raw stats as they were saved, ex. an objective for every single agent."""
for tup in dic[key]:
extended_ord = np.empty((tup[0][-1]))
extended_ord[:] = np.nan
for i in range(len(tup[0])):
extended_ord[tup[0][i]] = tup[1][i]
plt.plot(extended_ord)
plt.savefig(f"{destination}/Raw_{key}")
def plot(value, key, destination):
extensions = list()
max_range = 0
for tup in value:
if tup[0][-1] > max_range:
max_range = tup[0][-1] + 1
extended_ord = np.empty((tup[0][-1]+1))
extended_ord[:] = np.nan
for i in range(len(tup[0])):
extended_ord[tup[0][i]] = tup[1][i]
extensions.append(extended_ord)
plt.plot(extended_ord) # Raw stats
plt.title(f"{key} raw data")
plt.savefig(f"{destination}/Raw_{key}")
plt.clf()
# Min and Max
max_ext = np.empty(max_range)
max_ext[:] = np.nan
min_ext = np.empty(max_range)
min_ext[:] = np.nan
cat = [list() for i in range(max_range)]
for ext in extensions:
for i in range(len(ext)):
if ext[i] != np.nan:
cat[i].append(ext[i])
med_ext = np.empty(max_range)
med_ext[:] = np.nan
quart1 = np.empty(max_range)
quart1[:] = np.nan
quart3 = np.empty(max_range)
quart3[:] = np.nan
for i in range(len(cat)):
if len(cat[i]) != 0:
val = np.array(cat[i])
med_ext[i] = np.quantile(val, 0.5)
quart1[i] = | np.quantile(val, 0.25) | numpy.quantile |
import os
from skimage import morphology as morph
from torch.utils import data
import pandas as pd
import pycocotools.mask as mask_util
from skimage import measure
import utils as ut
import numpy as np
from skimage.transform import resize
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
from torch.nn.functional import interpolate
import pycocotools.mask as mask_utils
import torch
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.bounding_box import BoxList
from pycocotools import mask as maskUtils
# ------------------------------
# proposals
def annList2BestDice(annList, batch, maskVoid_flag=True):
if annList == []:
return {"annList": annList}
assert len(batch["proposals"]) == 1
proposals = batch["proposals"][0]
# dt = [ann["segmentation"] for ann in batch["annList"]]
# dt = [{'size': [333, 500], 'counts': 'Ya\\18S:3N2M2M3O1O1N2M3K5N2O1N2jNV1L4N2O1N2\\Od0L4N2M3K5N2O1O1N2N2N2O1O1N2N2O1O1N2N2O1O1O1N2O1O1O1O1O1N2O1O1O1N2N2O1O1O1N2O1O1O100O10000O100O100N2O100O10O0H9N1O100O1O101O1O0N3N2O1N2N2N2N3N1N2N3J6K5N2M3L3N3N1O2N1N3M4N1N3N2N1O2M3M4L4K4M2N5J5K4L3N3L6D;I6L4L5J`U^2'}, {'size': [333, 500], 'counts': '[ea36Q:;J3hNHUH=\\OBf7>eHc0S7LaH9\\7^1M2L4I7O1O2L3N2N2O1O2L3O1O2M3L6LR1mN4M2M3N9G101O1O000000000000001O00000000000000000000aNfKkLZ4e2XLYMh3T2jKfLg0U1_3P2RMoMn2P2SMPNm2P2SMPNm2o1TMQNl2o1TMQNl2o1TMQNl2o1UMPNk2P2RLlLa0T1]3P2mKRMe0n0^3P2lKTMe0l0_3Q2iKVMh0g0`3T2eKXMj0d0a3i2^LWMb3i2_LVMa3j2`LUM`3k2aLTM_3l2bLSM^3m2dLQM\\3o2fLoLZ3Q3hLmLY3R3jLkLV3U3VM_Lk2`3RMcLQ3Y3hLoLY3P3fLQM[3n2dLSM^3k2^LZMc3d2ZL_Mh3_2ULdMl3[2RLgMP4W2TLSMPO0o4k2iLnLY3P3jLlLY3S3iLiLY3V3U2J:F`0@4L6J8H5K4L8H4L2M2N1O2M2M6Iah?'}]
annList_segs = [ann["segmentation"] for ann in annList]
props_segs = [prop["segmentation"]
for prop in proposals if prop["score"] > 0.75]
ious = maskUtils.iou(annList_segs,
props_segs,
np.zeros(len(annList_segs)))
if len(ious) == 0:
return {"annList": annList}
# return {"annList":annList}
indices = ious.argmax(axis=1)
annList_new = []
# print("best ious", ious[:, indices])
for i, ind in enumerate(indices):
ann_org = annList[i]
ann_prop_seg = props_segs[ind]
# Apply Void
binmask = ann2mask({"segmentation": ann_prop_seg})["mask"]
maskVoid = batch["maskVoid"]
if (maskVoid is not None) and maskVoid_flag:
binmask = binmask * (ut.t2n(maskVoid).squeeze())
ann_prop_seg = maskUtils.encode(
np.asfortranarray(ut.t2n(binmask)).astype("uint8"))
ann_prop_seg["counts"] = ann_prop_seg["counts"].decode("utf-8")
ann_org["segmentation"] = ann_prop_seg
annList_new += [ann_org]
return {"annList": annList_new}
@torch.no_grad()
def annList2best_objectness(annList, points,
single_point=True, maskVoid=None):
for ann in annList:
pass
return {"annList": annList, "blobs": blobs, "categoryDict": categoryDict}
@torch.no_grad()
def pointList2BestObjectness(pointList, image_id,
single_point=True, maskVoid=None):
import ipdb;
ipdb.set_trace() # breakpoint 2a55a8ba //
propDict = pointList2propDict(
pointList, batch, thresh=0.5,
single_point=single_point)
h, w = propDict["background"].squeeze().shape
blobs = np.zeros((h, w), int)
categoryDict = {}
annList = []
for i, prop in enumerate(propDict['propDict']):
if len(prop["annList"]) == 0:
continue
blobs[prop["annList"][0]["mask"] != 0] = i + 1
categoryDict[i + 1] = prop["category_id"]
if maskVoid is not None:
binmask = prop["annList"][0]["mask"] * (ut.t2n(maskVoid).squeeze())
else:
binmask = prop["annList"][0]["mask"]
seg = maskUtils.encode(
np.asfortranarray(ut.t2n(binmask)).astype("uint8"))
seg["counts"] = seg["counts"].decode("utf-8")
score = prop["annList"][0]["score"]
annList += [{
"segmentation": seg,
"iscrowd": 0,
"area": int(maskUtils.area(seg)),
"image_id": image_id,
"category_id": int(prop['category_id']),
"height": h,
"width": w,
"score": score
}]
return {"annList": annList, "blobs": blobs, "categoryDict": categoryDict}
class SharpProposals:
def __init__(self, fname):
# if dataset_name == "pascal":
self.proposals_path = batch["proposals_path"][0]
if "SharpProposals_name" in batch:
batch_name = batch["SharpProposals_name"][0]
else:
batch_name = batch["name"][0]
name_jpg = self.proposals_path + "{}.jpg.json".format(batch_name)
name_png = self.proposals_path + "{}.json".format(batch_name)
if os.path.exists(name_jpg):
name = name_jpg
else:
name = name_png
_, _, self.h, self.w = batch["images"].shape
if "resized" in batch and batch["resized"].item() == 1:
name_resized = self.proposals_path + "{}_{}_{}.json".format(batch["name"][0],
self.h, self.w)
if not os.path.exists(name_resized):
proposals = ut.load_json(name)
json_file = loop_and_resize(self.h, self.w, proposals)
ut.save_json(name_resized, json_file)
else:
name_resized = name
# name_resized = name
proposals = ut.load_json(name_resized)
self.proposals = sorted(proposals, key=lambda x: x["score"],
reverse=True)
def __getitem__(self, i):
encoded = self.proposals[i]["segmentation"]
proposal_mask = maskUtils.decode(encoded)
return {"mask": proposal_mask,
"score": self.proposals[i]["score"]}
def __len__(self):
return len(self.proposals)
def sharpmask2psfcn_proposals(self):
import ipdb;
ipdb.set_trace() # breakpoint 102ed333 //
pass
def loop_and_resize(h, w, proposals):
proposals_resized = []
n_proposals = len(proposals)
for i in range(n_proposals):
print("{}/{}".format(i, n_proposals))
prop = proposals[i]
seg = prop["segmentation"]
proposal_mask = maskUtils.decode(seg)
# proposal_mask = resize(proposal_mask*255, (h, w), order=0).astype("uint8")
if not proposal_mask.shape == (h, w):
proposal_mask = (resize(proposal_mask * 255, (h, w), order=0) > 0).astype(int)
seg = maskUtils.encode(np.asfortranarray(proposal_mask).astype("uint8"))
seg["counts"] = seg["counts"].decode("utf-8")
prop["segmentation"] = seg
proposals_resized += [proposals[i]]
else:
proposals_resized += [proposals[i]]
return proposals_resized
@torch.no_grad()
def pointList2propDict(pointList, batch, single_point=False, thresh=0.5):
sharp_proposals = SharpProposals(batch)
propDict = []
shape = pointList[0]["shape"]
foreground = np.zeros(shape, int)
if single_point:
points = pointList2mask(pointList)["mask"]
idDict = {}
annDict = {}
for i, p in enumerate(pointList):
annDict[i] = []
idDict[i] = []
for k in range(len(sharp_proposals)):
proposal_ann = sharp_proposals[k]
if not (proposal_ann["score"] > thresh):
continue
proposal_mask = proposal_ann["mask"]
for i, p in enumerate(pointList):
if proposal_mask[p["y"], p["x"]] == 0:
continue
if single_point and (points * proposal_mask).sum() > 1:
continue
# score = proposal_ann["score"]
annDict[i] += [proposal_ann]
idDict[i] += [k]
for i in annDict:
annList = annDict[i]
idList = idDict[i]
p = pointList[i]
mask = annList2mask(annList)["mask"]
if mask is not None:
foreground = foreground + mask
# foreground[foreground<2]=0
propDict += [{
"annList": annList,
"point": p,
"idList": idList,
"category_id": int(p["category_id"])
}]
#########
return {
"propDict": propDict,
"foreground": foreground,
"background": (foreground == 0).astype(int)
}
def mask2annList(maskClass, maskObjects, image_id, classes=None,
maskVoid=-1):
annList = []
maskClass = np.array(maskClass)
maskObjects = np.array(maskObjects)
objects = np.setdiff1d(np.unique(maskObjects), [0, 255])
for i, obj in enumerate(objects):
binmask = maskObjects == obj
category_id = np.unique(maskClass * binmask)[1]
if classes is not None and category_id not in classes:
continue
ann = mask2ann(binmask, category_id=category_id,
image_id=image_id,
maskVoid=maskVoid,
score=1,
point=-1)
# "bbox" - [nx4] Bounding box(es) stored as [x y w h]
ann["bbox"] = maskUtils.toBbox(ann["segmentation"])
# ann["id"] = i + 1
annList += [ann]
return annList
def annList2targets(annList):
if len(annList) == 0:
return []
ann = annList[0]
H = ann["height"]
W = ann["width"]
img_size = (W, H)
annList = [obj for obj in annList if (("iscrowd" not in obj) or
(obj["iscrowd"] == 0))]
boxes = [obj["bbox"] for obj in annList]
boxes = torch.as_tensor(boxes).reshape(len(annList), 4) # guard against no boxes
target = BoxList(boxes, img_size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in annList]
classes = torch.tensor(classes)
target.add_field("labels", classes)
if "segmentation" in annList[0]:
masks = [obj["segmentation"] for obj in annList]
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
masks = SegmentationMask(masks, img_size, mode="mask")
target.add_field("masks", masks)
target = target.clip_to_image(remove_empty=True)
# ut.images(torch.ones(1,3,500,500), annList=target2annList(target, "1"))
return target
def targets2annList_bbox(preds, image_id=-1, maskVoid=None):
bbox = preds.bbox
W, H = preds.size
annList = bbox2annList(
bbox,
scoreList=preds.get_field("scores"),
categoryList=preds.get_field("labels"),
H=H,
W=W,
image_id=image_id,
mode="xyxy",
mask=None)
return annList
def targets2annList(targets, shape, image_id=-1, maskVoid=None, score_threshold=0.5):
# preds.to("cpu")
H, W = shape
preds = targets.resize((W, H))
labels = targets.get_field("labels")
if "masks" in targets.extra_fields:
masks = targets.get_field("masks")
masks = masks.get_mask_tensor()[:, None]
else:
masks = targets.get_field("mask")
if "scores" in targets.extra_fields:
scores = targets.get_field("scores")
else:
scores = labels * 0 + 1
masker = Masker(threshold=0.5, padding=1)
if list(masks.shape[-2:]) != [H, W]:
masks = masker(masks.expand(1, -1, -1, -1, -1), targets)
masks = masks[0]
# apply mask void
if maskVoid is not None and masks.shape[0] > 0:
masks = masks * maskVoid.byte()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
annList = segm2annList(
segm=rles,
boxes=preds.bbox.cpu(),
scoreList=scores,
categoryList=labels,
H=H,
W=W,
image_id=image_id,
mode="xyxy",
mask=None,
score_threshold=score_threshold)
return annList
def proposals2bbox(bbox, scoreList, W, H):
annList = bbox2annList(
bbox,
scoreList,
np.ones(bbox.shape[0]),
H,
W,
image_id="proposal",
mode="xyxy",
mask=None)
return annList
def proposals2annList(proposals):
bbox = proposals.bbox
W, H = proposals.size
annList = bbox2annList(
bbox,
proposals.get_field("objectness"),
np.ones(bbox.shape[0]),
H,
W,
image_id="proposal",
mode="xyxy",
mask=None)
return annList
def target2annList(target, image_id):
bbox = target.bbox
W, H = target.size
annList = bbox2annList(
bbox,
np.zeros(bbox.shape[0]),
target.get_field("labels"),
H,
W,
image_id,
mode="xyxy",
mask=None)
return annList
def load_ann_json(fname, image_shape):
name = ut.extract_fname(fname)
fname_new = fname.replace(name, name + "_%s.json" % str(image_shape))
if os.path.exists(fname_new):
return ut.load_json(fname_new)
else:
annList = ut.load_json(fname)
annList_new = []
for ann in annList:
binmask = ann2mask(ann)["mask"]
if binmask.shape != image_shape:
binmask = resize(
ann2mask(ann)["mask"],
output_shape=image_shape,
order=0,
anti_aliasing=False,
mode="constant",
preserve_range=True).astype(int)
seg = maskUtils.encode(
np.asfortranarray(ut.t2n(binmask)).astype("uint8"))
seg["counts"] = seg["counts"].decode("utf-8")
ann["score"] = ann["score"]
ann["segmentation"] = seg
ann["bbox"] = maskUtils.toBbox(seg).astype(int).tolist()
annList_new += [ann]
ut.save_json(fname_new, annList_new)
return annList_new
def intersect_bbox(b1, b2):
xs_1, ys_1, w1, h1 = np.array(b1)
xs_2, ys_2, w2, h2 = np.array(b2)
xe_1 = xs_1 + w1
ye_1 = ys_1 + h1
xe_2 = xs_2 + w2
ye_2 = ys_2 + h2
# if(y1<(y2+h2) or x1<(x2+w2)):
# flag = False
# elif((x1 + w1) > x2 or (y1 + h1) > y2):
# flag = False
# else:
# flag = True
return not (xe_1 < xs_2 or xs_1 > xe_2 or ye_1 < ys_2 or ys_1 > ye_2)
def annList2propList(annList, sharpmask_annList):
new_annList = []
for ann in annList:
binmask = ann2mask(ann)["mask"]
best_score = 0.
best_mask = binmask
# ut.images(sharp_mask, win="resized")
# ut.images(ann2mask(sharp_ann)["mask"], win="original")
for sharp_ann in sharpmask_annList:
if ann["score"] < 0.5:
continue
if not intersect_bbox(ann["bbox"], sharp_ann["bbox"]):
continue
sharp_mask = ann2mask(sharp_ann)["mask"]
score = dice(sharp_mask, binmask)
# assert score > 0
if score > best_score:
best_mask = sharp_mask
best_score = score
# ut.images(sharp_mask, win="sharpmask")
# ut.images(binmask, win="predmask")
# break
# if score > best_dice:
# best_dice = score
# best_mask = sharp_mask
seg = maskUtils.encode(
np.asfortranarray(ut.t2n(best_mask)).astype("uint8"))
seg["counts"] = seg["counts"].decode("utf-8")
ann["score"] = best_score
ann["segmentation"] = seg
new_annList += [ann]
return new_annList
def maskList2annList(maskList, categoryList, image_id, scoreList=None):
annList = []
_, h, w = maskList.shape
for i in range(maskList.shape[0]):
binmask = maskList[i]
seg = maskUtils.encode(
np.asfortranarray(ut.t2n(binmask)).astype("uint8"))
seg["counts"] = seg["counts"].decode("utf-8")
if scoreList is not None:
score = scoreList[i]
annList += [{
"segmentation": seg,
"iscrowd": 0,
"bbox": maskUtils.toBbox(seg).astype(int).tolist(),
"area": int(maskUtils.area(seg)),
"image_id": image_id,
"category_id": int(categoryList[i]),
"height": h,
"width": w,
"score": score
}]
return annList
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
a 2d tensor
"""
def __init__(self, segm, size, mode):
width, height = size
if isinstance(segm, Mask):
mask = segm.mask
else:
if type(segm) == list:
# polygons
mask = Polygons(
segm, size,
'polygon').convert('mask').to(dtype=torch.float32)
elif type(segm) == dict and 'counts' in segm:
if type(segm['counts']) == list:
# uncompressed RLE
h, w = segm['size']
rle = mask_utils.frPyObjects(segm, h, w)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask).to(dtype=torch.float32)
else:
# compressed RLE
mask = mask_utils.decode(segm)
mask = torch.from_numpy(mask).to(dtype=torch.float32)
else:
# binary mask
if type(segm) == np.ndarray:
mask = torch.from_numpy(segm).to(dtype=torch.float32)
else: # torch.Tensor
mask = segm.to(dtype=torch.float32)
self.mask = mask
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented")
width, height = self.size
if method == FLIP_LEFT_RIGHT:
max_idx = width
dim = 1
elif method == FLIP_TOP_BOTTOM:
max_idx = height
dim = 0
flip_idx = torch.tensor(list(range(max_idx)[::-1]))
flipped_mask = self.mask.index_select(dim, flip_idx)
return Mask(flipped_mask, self.size, self.mode)
def crop(self, box):
box = [int(b) for b in box]
# w, h = box[2] - box[0], box[3] - box[1]
w, h = box[2] - box[0] + 1, box[3] - box[1] + 1
# if w == 0:
# box[2] = box[0] + 1
# if h == 0:
# box[3] = box[1] + 1
w = max(w, 1)
h = max(h, 1)
# cropped_mask = self.mask[box[1]: box[3], box[0]: box[2]]
cropped_mask = self.mask[box[1]:box[3] + 1, box[0]:box[2] + 1]
return Mask(cropped_mask, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
width, height = size
scaled_mask = interpolate(
self.mask[None, None, :, :], (height, width), mode='nearest')[0, 0]
return Mask(scaled_mask, size=size, mode=self.mode)
def convert(self, mode):
mask = self.mask.to(dtype=torch.uint8)
return mask
def __iter__(self):
return iter(self.mask)
def __repr__(self):
s = self.__class__.__name__ + "("
# s += "num_mask={}, ".format(len(self.mask))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class Polygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size, mode):
# assert isinstance(polygons, list), '{}'.format(polygons)
if isinstance(polygons, list):
polygons = [
torch.as_tensor(p, dtype=torch.float32) for p in polygons
]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented")
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(
float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects([p.numpy() for p in self.polygons],
height, width)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
#
# class SegmentationMask(object):
# """
# This class stores the segmentations for all objects in the image
# """
#
# def __init__(self, segms, size, mode=None):
# """
# Arguments:
# segms: three types
# (1) polygons: a list of list of lists of numbers. The first
# level of the list correspond to individual instances,
# the second level to all the polygons that compose the
# object, and the third level to the polygon coordinates.
# (2) rles: COCO's run length encoding format, uncompressed or compressed
# (3) binary masks
# size: (width, height)
# mode: 'polygon', 'mask'. if mode is 'mask', convert mask of any format to binary mask
# """
# assert isinstance(segms, list)
# if len(segms) == 0:
# self.masks = []
# mode = 'mask'
# else:
# if type(segms[0]) != list:
# mode = 'mask'
#
# if mode == 'mask':
# self.masks = [Mask(m, size, mode) for m in segms]
# else: # polygons
# self.masks = [Polygons(p, size, mode) for p in segms]
# self.size = size
# self.mode = mode
#
# def transpose(self, method):
# if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
# raise NotImplementedError(
# "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented")
#
# flipped = []
# for mask in self.masks:
# flipped.append(mask.transpose(method))
# return SegmentationMask(flipped, size=self.size, mode=self.mode)
#
# def crop(self, box):
# w, h = box[2] - box[0], box[3] - box[1]
# cropped = []
# for mask in self.masks:
# cropped.append(mask.crop(box))
# return SegmentationMask(cropped, size=(w, h), mode=self.mode)
#
# def resize(self, size, *args, **kwargs):
# scaled = []
# for mask in self.masks:
# scaled.append(mask.resize(size, *args, **kwargs))
# return SegmentationMask(scaled, size=size, mode=self.mode)
#
# def to(self, *args, **kwargs):
# return self
#
# def __getitem__(self, item):
# if isinstance(item, (int, slice)):
# selected_masks = [self.masks[item]]
# else:
# # advanced indexing on a single dimension
# selected_masks = []
# if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
# item = item.nonzero()
# item = item.squeeze(1) if item.numel() > 0 else item
# item = item.tolist()
# for i in item:
# selected_masks.append(self.masks[i])
# return SegmentationMask(selected_masks, size=self.size, mode=self.mode)
#
# def __iter__(self):
# return iter(self.masks)
#
# def __repr__(self):
# s = self.__class__.__name__ + "("
# s += "num_instances={}, ".format(len(self.masks))
# s += "image_width={}, ".format(self.size[0])
# s += "image_height={})".format(self.size[1])
# return s
def annList2maskList(annList, box=False, color=False):
n_anns = len(annList)
if n_anns == 0:
return {"mask": None}
ann = annList[0]
try:
h, w = ann["mask"].shape
except:
h, w = ann["height"], ann["width"]
maskList = np.zeros((h, w, n_anns), int)
categoryList = np.zeros(n_anns, int)
for i in range(n_anns):
ann = annList[i]
if "mask" in ann:
ann_mask = ann["mask"]
else:
ann_mask = maskUtils.decode(ann["segmentation"])
assert ann_mask.max() <= 1
maskList[:, :, i] = ann_mask
categoryList[i] = ann["category_id"]
# mask[mask==1] = ann["category_id"]
return {"maskList": maskList, "categoryList": categoryList}
def batch2annList(batch):
annList = []
image_id = int(batch["name"][0].replace("_", ""))
# image_id = batch["image_id"][0]
height, width = batch["images"].shape[-2:]
maskObjects = batch["maskObjects"]
maskClasses = batch["maskClasses"]
n_objects = maskObjects[maskObjects != 255].max()
object_uniques = np.unique(maskObjects)
object_uniques = object_uniques[object_uniques != 0]
id = 1
for obj_id in range(1, n_objects + 1):
if obj_id == 0:
continue
binmask = (maskObjects == obj_id)
segmentation = maskUtils.encode(
np.asfortranarray(ut.t2n(binmask).squeeze()))
segmentation["counts"] = segmentation["counts"].decode("utf-8")
uniques = (binmask.long() * maskClasses).unique()
uniques = uniques[uniques != 0]
assert len(uniques) == 1
category_id = uniques[0].item()
annList += [{
"segmentation": segmentation,
"iscrowd": 0,
# "bbox":maskUtils.toBbox(segmentation).tolist(),
"area": int(maskUtils.area(segmentation)),
"id": id,
"height": height,
"width": width,
"image_id": image_id,
"category_id": category_id
}]
id += 1
return annList
def test(model, val_set, metric="bbox"):
pass
@torch.no_grad()
def validate(model, val_set, method="annList_box"):
n_batches = len(val_set)
pred_annList = []
gt_annList = []
for i in range(n_batches):
batch = ut.get_batch(val_set, [i])
print(i, "/", n_batches)
pred_dict = model.predict(batch, method=method)
assert batch["name"][0] not in model.trained_batch_names
pred_annList += pred_dict["annList"]
results = compare_annList(gt_annList, pred_annList, val_set)
results_dict = results["result_dict"]
return results_dict
@torch.no_grad()
def valBatch(model, batch, method="annList_box"):
pred_annList = []
gt_annList = []
pred_annList += model.predict(batch, method=method)
gt_annList += batch["annList"]
result_dict = compare_annList(gt_annList, pred_annList)
return result_dict
def pred2annList(boxes_yxyx, scoreList, categoryList, batch, mask=None):
image_shape = batch["images"].shape
_, _, h, w = image_shape
boxes_yxyx_denorm = bbox_yxyx_denormalize(boxes_yxyx, image_shape)
boxes_xyhw = ut.t2n(yxyx2xywh(boxes_yxyx_denorm))
annList = []
for i in range(len(boxes_xyhw)):
ann = {
"bbox": list(map(int, boxes_xyhw[i])),
"image_id": batch["meta"]["image_id"][0],
"category_id": int(categoryList[i]),
"height": h,
"width": w,
"score": float(scoreList[i])
}
annList += [ann]
return annList
def segm2annList(segm,
boxes,
scoreList,
categoryList,
H,
W,
image_id,
mode="yxyx",
mask=None,
score_threshold=None):
if len(boxes) == 0:
return []
if boxes.max() < 1:
boxes_denorm = bbox_yxyx_denormalize(boxes, (1, 3, H, W))
else:
boxes_denorm = boxes
if mode == "yxyx":
boxes_xywh = ut.t2n(yxyx2xywh(boxes_denorm))
else:
boxes_xywh = ut.t2n(xyxy2xywh(boxes_denorm))
annList = []
for i in range(len(boxes_xywh)):
score = float(scoreList[i])
if score_threshold is not None and score < score_threshold:
continue
ann = {
"segmentation": segm[i],
"bbox": list(map(int, boxes_xywh[i])),
"image_id": image_id,
"category_id": int(categoryList[i]),
"height": H,
"width": W,
"score": score
}
annList += [ann]
return annList
def bbox2annList(boxes,
scoreList,
categoryList,
H,
W,
image_id,
mode="yxyx",
mask=None):
if len(boxes) == 0:
return []
if boxes.max() < 1:
boxes_denorm = bbox_yxyx_denormalize(boxes, (1, 3, H, W))
else:
boxes_denorm = boxes
if mode == "yxyx":
boxes_xywh = ut.t2n(yxyx2xywh(boxes_denorm))
else:
boxes_xywh = ut.t2n(xyxy2xywh(boxes_denorm))
annList = []
for i in range(len(boxes_xywh)):
ann = {
"bbox": list(map(int, boxes_xywh[i])),
"image_id": image_id,
"category_id": int(categoryList[i]),
"height": H,
"width": W,
"score": float(scoreList[i])
}
annList += [ann]
return annList
# def annList2annDict(annList, type="instances"):
# # type = "instances or bbox"
# if isinstance(annList[0], list):
# annList = annList[0]
# annDict = {}
# annDict["categories"] = [{"id":category_id} for category_id in
# np.unique([a["category_id"] for a in annList])]
# try:
# annDict["images"] = [{"file_name":a["image_id"],
# "id":a["image_id"],
# "width":a["segmentation"]["size"][1],
# "height":a["segmentation"]["size"][0]} for a in annList]
# except:
# annDict["images"] = [{"file_name":a["image_id"],
# "id":a["image_id"],
# "width":a["width"],
# "height":a["height"]} for a in annList]
# annDict["type"] = type
# if "id" not in annList[0]:
# for i, ann in enumerate(annList):
# ann["id"] = i
# annDict["annotations"] = annList
# return annDict
def bbox2mask(bbox, image_shape, window_box=None, mode="yxyx"):
# bbox = ut.t2n(bbox)
_, _, h, w = image_shape
if bbox.max() <= 1.:
bbox = ut.t2n(
bbox_yxyx_denormalize(bbox.cpu(), image_shape, window_box))
mask = np.zeros((h, w), int)
for i in range(bbox.shape[0]):
if mode == "xyxy":
x1, y1, x2, y2 = map(int, bbox[i])
else:
y1, x1, y2, x2 = map(int, bbox[i])
# print(y1,x1,y2,x2)
mask[y1:y2, x1] = 1
mask[y1:y2, x2] = 1
mask[y1, x1:x2] = 1
mask[y2, x1:x2] = 1
return mask
def clamp_boxes_yxyx(boxes, image_shape):
_, _, H, W = image_shape
# Height
boxes[:, 0] = boxes[:, 0].clamp(0, H - 1)
boxes[:, 2] = boxes[:, 2].clamp(0, H - 1)
# Width
boxes[:, 1] = boxes[:, 1].clamp(0, W - 1)
boxes[:, 3] = boxes[:, 3].clamp(0, W - 1)
return boxes
def apply_delta_on_bbox(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= torch.exp(deltas[:, 2])
width *= torch.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = torch.stack([y1, x1, y2, x2], dim=1)
return result
def compute_bbox_delta(b1_yxyx, b2_yxyx):
"""Applies the given deltas to the given boxes.
boxes 1: [N, 4] where each row is y1, x1, y2, x2
boxes 2: [N, 4] where each row is y1, x1, y2, x2
"""
b1_dict = bbox_yxyx_dict(ut.t2n(b1_yxyx))
b2_dict = bbox_yxyx_dict(ut.t2n(b2_yxyx))
y1 = (b1_dict["yc"] - b2_dict["yc"]) / b2_dict["h"]
x1 = (b1_dict["xc"] - b2_dict["xc"]) / b2_dict["w"]
y2 = np.log(b1_dict["h"] / b2_dict["h"])
x2 = np.log(b1_dict["w"] / b2_dict["w"])
return torch.FloatTensor([y1, x1, y2, x2]).t()
def compute_overlaps_yxyx(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou_yxyx(box2, boxes1, area2[i], area1)
return overlaps
def compute_iou_yxyx(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def yxyx2xywh(boxes_yxyx):
y1, x1, y2, x2 = torch.chunk(boxes_yxyx, chunks=4, dim=1)
h = y2 - y1
w = x2 - x1
return torch.cat([x1, y1, w, h], dim=1)
def xyxy2xywh(boxes_xyxy):
x1, y1, x2, y2 = torch.chunk(boxes_xyxy, chunks=4, dim=1)
h = y2 - y1
w = x2 - x1
return torch.cat([x1, y1, w, h], dim=1)
def bbox_yxyx_normalize(bbox, image_shape):
_, _, H, W = image_shape
scale = torch.FloatTensor([H, W, H, W])
return bbox / scale
def bbox_yxyx_denormalize(bbox, image_shape, window_box=None, clamp=True):
_, _, H, W = image_shape
if window_box is None:
window_box = [0, 0, H, W]
else:
window_box = list(map(int, window_box))
H_window_box = window_box[2] - window_box[0]
W_window_box = window_box[3] - window_box[1]
scales = torch.FloatTensor(
[H_window_box, W_window_box, H_window_box, W_window_box])
shift = torch.FloatTensor(
[window_box[0], window_box[1], window_box[0], window_box[1]])
# Translate bounding boxes to image domain
bbox = bbox * scales + shift
if clamp:
bbox = clamp_boxes_yxyx(bbox, image_shape)
return bbox
def bbox_yxyx_dict(bbox):
h = bbox[:, 2] - bbox[:, 0]
w = bbox[:, 3] - bbox[:, 1]
yc = bbox[:, 0] + 0.5 * h
xc = bbox[:, 1] + 0.5 * w
return {"xc": xc, "yc": yc, "h": h, "w": w}
def bbox_xywh_dict(bbox, H, W):
pass
def bbox_yxyx_shape2shape(bbox, shape1, shape2):
bbox = ut.n2t(bbox).float()
bbox_norm = bbox_yxyx_normalize(bbox, shape1)
return bbox_yxyx_denormalize(bbox_norm, shape2)
def annList2bbox(annList, mode="yxyx"):
n_objs = len(annList)
bbox_yxyx = torch.zeros((n_objs, 4))
bbox_yxhw = torch.zeros((n_objs, 4))
gt_category_ids = torch.zeros((n_objs))
seg_areas = torch.zeros((n_objs))
# Load object bounding boxes into a data frame.
for i, ann in enumerate(annList):
try:
W, H = ann["width"], ann["height"]
x, y, w, h = ann["bbox"].flatten()
except:
H, W = ann["segmentation"]["size"]
x, y, w, h = maskUtils.toBbox(ann["segmentation"])
x1 = x
y1 = y
x2 = min(x + w, W - 1)
y2 = min(y + h, H - 1)
bbox_yxyx[i] = torch.FloatTensor((y1 / H, x1 / W, y2 / H, x2 / W))
bbox_yxhw[i] = torch.FloatTensor((x / W, y / H, w / W, h / H))
seg_areas[i] = h * w
gt_category_ids[i] = ann["category_id"]
return {
'bbox_yxyx': bbox_yxyx,
'bbox_yxhw': bbox_yxhw,
'category_ids': gt_category_ids,
'seg_areas': seg_areas
}
def ann2poly(ann):
mask = ann2mask(ann)["mask"]
# f_mask = np.asfortranarray(mask)
# e_mask = maskUtils.encode(f_mask)
# area = maskUtils.area(e_mask)
# bbox = maskUtils.toBbox(e_mask)
contours = measure.find_contours(mask, 0.5)
polyList = []
for contour in contours:
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().astype(int)
polyList.append(segmentation)
return polyList
def poly2mask(poly, bbox_new):
Rs = maskUtils.frPoly
mo = maskUtils.decode(Rs)
return polyList
def load_annList(exp_dict, predict_method, reset=None):
dataset_name = exp_dict["dataset_name"]
base = "/mnt/projects/counting/Saves/main/"
fname = base + "lcfcn_points/{}_{}_annList.json".format(
dataset_name, predict_method)
if os.path.exists(fname) and reset != "reset":
return ut.load_json(fname)
else:
_, val_set = load_trainval(exp_dict)
loader = data.DataLoader(
val_set, batch_size=1, num_workers=0, drop_last=False)
pointDict = load_LCFCNPoints(exp_dict)
annList = []
for i, batch in enumerate(loader):
print(i, "/", len(loader), " - annList")
pointList = pointDict[batch["name"][0]]
if len(pointList) == 0:
continue
if predict_method == "BestObjectness":
pred_dict = pointList2BestObjectness(pointList, batch)
elif predict_method == "UpperBound":
pred_dict = pointList2UpperBound(pointList, batch)
annList += pred_dict["annList"]
ut.save_json(fname, annList)
return annList
def load_BestObjectness(exp_dict, reset=None):
return load_annList(
exp_dict, predict_method="BestObjectness", reset=reset)
def load_UpperBound(exp_dict, reset=None):
return load_annList(exp_dict, predict_method="UpperBound", reset=reset)
def get_perSizeResults(gt_annDict, pred_annList):
cocoGt = pycocotools.coco.COCO(gt_annDict)
# pred_annList2 = []
cocoDt = cocoGt.loadRes(pred_annList)
cocoEval = COCOeval(cocoGt, cocoDt, "segm")
cocoEval.params.iouThrs = np.array([.25, .5, .75])
cocoEval.evaluate()
cocoEval.accumulate()
results = cocoEval.summarize()
result_dict = {}
for i in ["0.25", "0.5", "0.75"]:
score = results["{}_all".format(i)]
result_dict[i] = score
return {"results": results, "result_dict": result_dict}
def get_perCategoryResults(gt_annDict, pred_annDict):
cocoGt = pycocotools.coco.COCO(gt_annDict)
cocoDt = cocoGt.loadRes(pred_annDict)
cocoEval = COCOeval(cocoGt, cocoDt, "segm")
results = {}
for i in cocoEval.params.catIds:
cocoEval = COCOeval(cocoGt, cocoDt, "segm")
cocoEval.params.iouThrs = np.array([.5])
cocoEval.params.catIds = [i]
cocoEval.params.areaRngLbl = ["all"]
cocoEval.evaluate()
cocoEval.accumulate()
stat = list(cocoEval.summarize().values())
assert len(stat) == 1
results[i] = stat[0]
return results
def get_image_ids(pred_annList):
idList = set()
for p in pred_annList:
idList.add(p["image_id"])
return list(idList)
# def pred_for_coco2014(exp_dict, pred_annList):
# if exp_dict["dataset_name"] == "CocoDetection2014":
# train_set,_ = ut.load_trainval(exp_dict)
# for p in pred_annList:
# p["image_id"] = int(p["image_id"])
# p["category_id"] = train_set.label2category[p["category_id"]]
# return pred_annList
def test_baselines(exp_dict, reset=None):
#### Best Objectness
# pointDict = load_LCFCNPoints(exp_dict)
pred_annList = load_UpperBound(exp_dict, reset=reset)
if os.path.exists(exp_dict["path_baselines"]) and reset != "reset":
result_list = ut.load_pkl(exp_dict["path_baselines"])
return result_list
else:
gt_annDict = load_gtAnnDict(exp_dict)
pred_annList = load_BestObjectness(exp_dict, reset=reset)
# idList1 = get_image_ids(pred_annList)
# idList2 = get_image_ids(gt_annDict["annotations"])
results = get_perSizeResults(gt_annDict, pred_annList)
result_dict = results["result_dict"]
result_dict["predict_method"] = "BestObjectness"
result_list = [result_dict]
#### Upper bound
pred_annList = load_UpperBound(exp_dict, reset=reset)
results = get_perSizeResults(gt_annDict, pred_annList)
result_dict = results["result_dict"]
result_dict["predict_method"] = "UpperBound"
result_list += [result_dict]
ut.save_pkl(exp_dict["path_baselines"], result_list)
print(pd.DataFrame(result_list))
return result_list
def validate(model, dataset, predict_method, n_val=None, return_annList=False):
pred_annList = dataset2annList(
model, dataset, predict_method=predict_method, n_val=n_val)
gt_annDict = load_gtAnnDict({"dataset_name": type(dataset).__name__})
results = get_perSizeResults(gt_annDict, pred_annList)
result_dict = results["result_dict"]
result_dict["predict_method"] = predict_method
if return_annList:
return result_dict, pred_annList
return result_dict
def test_best(exp_dict, reset=None):
_, val_set = load_trainval(exp_dict)
history = ut.load_history(exp_dict)
# if reset == "reset":
try:
pred_annList = ut.load_best_annList(exp_dict)
except:
model = ut.load_best_model(exp_dict)
pred_annList = dataset2annList(
model, val_set, predict_method="BestDice", n_val=None)
ut.save_pkl(exp_dict["path_best_annList"], pred_annList)
# else:
# pred_annList = ut.load_best_annList(exp_dict)
gt_annDict = load_gtAnnDict(exp_dict)
results = get_perSizeResults(gt_annDict, pred_annList)
result_dict = results["result_dict"]
result_dict["predict_method"] = "BestDice"
result_dict["epoch"] = history["best_model"]["epoch"]
result_list = test_baselines(exp_dict)
result_list += [result_dict]
print(pd.DataFrame(result_list))
def get_random_indices(mask, n_indices=10):
mask_ind = np.where(mask.squeeze())
n_pixels = mask_ind[0].shape[0]
P_ind = np.random.randint(0, n_pixels, n_indices)
yList = mask_ind[0][P_ind]
xList = mask_ind[1][P_ind]
return {"yList": yList, "xList": xList}
def propDict2seedList(propDict, n_neighbors=100, random_proposal=False):
seedList = []
for prop in propDict["propDict"]:
if len(prop["annList"]) == 0:
seedList += [{
"category_id": [prop["point"]["category_id"]],
"yList": [prop["point"]["y"]],
"xList": [prop["point"]["x"]],
"neigh": {
"yList": [prop["point"]["y"]],
"xList": [prop["point"]["x"]]
}
}]
else:
if random_proposal:
i = np.random.randint(0, len(prop["annList"]))
mask = prop["annList"][i]["mask"]
else:
mask = prop["annList"][0]["mask"]
seedList += [{
"category_id": [prop["point"]["category_id"]],
"yList": [prop["point"]["y"]],
"xList": [prop["point"]["x"]],
"neigh": get_random_indices(mask, n_indices=100)
}]
# Background
background = propDict["background"]
if background.sum() == 0:
y_axis = np.random.randint(0, background.shape[1], 100)
x_axis = | np.random.randint(0, background.shape[2], 100) | numpy.random.randint |
import h5py
import numpy as np
from scipy.io import loadmat
from operator import itemgetter
import math
import scipy as sp
import cv2
import matplotlib.pyplot as plt
import os, sys
import time
import multiprocessing
import random
# Generate Observation Map
def func(theta, m, I, imax, L, w, N, anglemask):
print('*',end='')
rotmat = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = np.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = np.int32(p)
q = np.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = np.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normal = [np.transpose(pn), np.transpose(qn), N[:,2]]
normal = np.transpose(normal)
temp = I*anglemask/np.transpose(imax)
embed = np.zeros((m, w*w), np.float32)
embed[:, light_idx] = temp
embed = np.reshape(embed, (m, w, w))
mask = np.zeros((m, w*w), np.bool_)
mask[:, light_idx] = anglemask
mask = np.reshape(mask, (m, w, w))
return embed, mask, normal, rotmat
def wrapper(args):
return func(*args)
# for multi core cpu
def light_embedding_2d_rot_invariant_multi(I, imax, L, w, N, div, isRandomThresh):
m = I.shape[0]
rows = w
cols = w
embed_rot = []
normal_rot = []
mask_rot = []
rot = []
anglemask = np.zeros((I.shape[0],I.shape[1]),np.float32)
for k in range(I.shape[0]): # numpixel
angle1 = 180*np.arccos(L[:,2])/np.pi
if isRandomThresh == True:
tgt = np.where(angle1<random.randint(20,90))
tgtrandom = np.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,np.min([1000,L.shape[0]]))]
else:
tgt = np.where(angle1<90)
anglemask[k,tgt] = 1
n = multiprocessing.cpu_count()
p = multiprocessing.Pool(n)
params = [(np.pi*(i*360.0/div)/180, m, I, imax, L, w, N, anglemask) for i in range(np.int32(div))]
result = p.map(wrapper, params)
p.close()
embed_list = []
mask_list = []
nml_list = []
rot_list = []
for i in range(div):
embed_list.append(result[i][0].copy())
mask_list.append(result[i][1].copy())
nml_list.append(result[i][2].copy())
rot_list.append(result[i][3].copy())
embed_list = np.array(embed_list)
embed_list = np.transpose(embed_list, (1,0,2,3))
mask_list = np.array(mask_list)
mask_list = np.transpose(mask_list, (1,0,2,3))
nml_list = np.array(nml_list)
nml_list = np.transpose(nml_list, (1,0,2))
del result,anglemask
return np.array(embed_list), np.array(mask_list), np.array(nml_list), np.array(rot_list), rows, cols
# for single core cpu
def light_embedding_2d_rot_invariant(I, imax, L, w, N, div, isRandomThresh):
m = I.shape[0]
embed_rot = []
normal_rot = []
mask_rot = []
rot = []
count = 0
anglemask = np.zeros((I.shape[0],I.shape[1]),np.float32)
for k in range(I.shape[0]):
angle1 = 180*np.arccos(L[:,2])/np.pi
if isRandomThresh == True:
tgt = np.where(angle1<random.randint(20,90))
tgtrandom = np.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,np.min([1000,L.shape[0]]))]
else:
tgt = np.where(angle1<90)
anglemask[k,tgt] = 1
for k in range(div):
theta = k*360/div
if theta < 360:
count = count + 1
theta = np.pi*theta/180
rotmat = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = np.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = np.int32(p)
q = np.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = np.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normal = [ | np.transpose(pn) | numpy.transpose |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 15:34:18 2020
@author: mubariz
"""
import caffe
import numpy as np
import os
def compute_map_features(ref_map):
mean_npy = np.load(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/hybridnet_mean.npy')
print('Mean Array Shape:' + str(mean_npy.shape))
net = caffe.Net(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/deploy.prototxt',str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/HybridNet.caffemodel', caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
print(net.blobs['data'].data.shape)
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mean_npy) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
ref_features=[]
features_ref_local=np.zeros((256,30))
for image_reference in ref_map:
image_reference = image_reference / 255.
image_reference = image_reference[:,:,(2,1,0)]
features_ref_local=np.zeros((256,30))
if(image_reference is not None):
transformed_image_ref = transformer.preprocess('data', image_reference)
net.blobs['data'].data[...] = transformed_image_ref.copy()
out = net.forward()
features_ref=np.asarray(net.blobs['conv5'].data)[1,:,:,:].copy()
for i in range(256):
#S=1
features_ref_local[i,0]=np.max(features_ref[i,:,:])
#S=2
features_ref_local[i,1]= | np.max(features_ref[i,0:6,0:6]) | numpy.max |
import os
import random
import argparse
import logging
import json
import time
import multiprocessing as mp
import scipy.sparse as ssp
from tqdm import tqdm
import networkx as nx
import torch
import numpy as np
import dgl
#os.environ["CUDA_VISIBLE_DEVICES"]="1"
def process_files(files, saved_relation2id, add_traspose_rels):
'''
files: Dictionary map of file paths to read the triplets from.
saved_relation2id: Saved relation2id (mostly passed from a trained model) which can be used to map relations to pre-defined indices and filter out the unknown ones.
'''
entity2id = {}
relation2id = saved_relation2id
triplets = {}
ent = 0
rel = 0
for file_type, file_path in files.items():
data = []
with open(file_path) as f:
file_data = [line.split() for line in f.read().split('\n')[:-1]]
for triplet in file_data:
if triplet[0] not in entity2id:
entity2id[triplet[0]] = ent
ent += 1
if triplet[2] not in entity2id:
entity2id[triplet[2]] = ent
ent += 1
# Save the triplets corresponding to only the known relations
if triplet[1] in saved_relation2id:
data.append([entity2id[triplet[0]], entity2id[triplet[2]], saved_relation2id[triplet[1]]])
triplets[file_type] = np.array(data)
id2entity = {v: k for k, v in entity2id.items()}
id2relation = {v: k for k, v in relation2id.items()}
# Construct the list of adjacency matrix each corresponding to eeach relation. Note that this is constructed only from the train data.
adj_list = []
for i in range(len(saved_relation2id)):
idx = np.argwhere(triplets['graph'][:, 2] == i)
adj_list.append(ssp.csc_matrix((np.ones(len(idx), dtype=np.uint8), (triplets['graph'][:, 0][idx].squeeze(1), triplets['graph'][:, 1][idx].squeeze(1))), shape=(len(entity2id), len(entity2id))))
# Add transpose matrices to handle both directions of relations.
adj_list_aug = adj_list
if add_traspose_rels:
adj_list_t = [adj.T for adj in adj_list]
adj_list_aug = adj_list + adj_list_t
dgl_adj_list = ssp_multigraph_to_dgl(adj_list_aug)
return adj_list, dgl_adj_list, triplets, entity2id, relation2id, id2entity, id2relation
def intialize_worker(model, adj_list, dgl_adj_list, id2entity, params, node_features, kge_entity2id):
global model_, adj_list_, dgl_adj_list_, id2entity_, params_, node_features_, kge_entity2id_
model_, adj_list_, dgl_adj_list_, id2entity_, params_, node_features_, kge_entity2id_ = model, adj_list, dgl_adj_list, id2entity, params, node_features, kge_entity2id
def get_neg_samples_replacing_head_tail(test_links, adj_list, num_samples=50):
n, r = adj_list[0].shape[0], len(adj_list)
heads, tails, rels = test_links[:, 0], test_links[:, 1], test_links[:, 2]
neg_triplets = []
for i, (head, tail, rel) in enumerate(zip(heads, tails, rels)):
neg_triplet = {'head': [[], 0], 'tail': [[], 0]}
neg_triplet['head'][0].append([head, tail, rel])
while len(neg_triplet['head'][0]) < num_samples:
neg_head = head
neg_tail = np.random.choice(n)
if neg_head != neg_tail and adj_list[rel][neg_head, neg_tail] == 0:
neg_triplet['head'][0].append([neg_head, neg_tail, rel])
neg_triplet['tail'][0].append([head, tail, rel])
while len(neg_triplet['tail'][0]) < num_samples:
neg_head = np.random.choice(n)
neg_tail = tail
# neg_head, neg_tail, rel = np.random.choice(n), np.random.choice(n), np.random.choice(r)
if neg_head != neg_tail and adj_list[rel][neg_head, neg_tail] == 0:
neg_triplet['tail'][0].append([neg_head, neg_tail, rel])
neg_triplet['head'][0] = np.array(neg_triplet['head'][0])
neg_triplet['tail'][0] = np.array(neg_triplet['tail'][0])
neg_triplets.append(neg_triplet)
return neg_triplets
def get_neg_samples_replacing_head_tail_all(test_links, adj_list):
n, r = adj_list[0].shape[0], len(adj_list)
heads, tails, rels = test_links[:, 0], test_links[:, 1], test_links[:, 2]
neg_triplets = []
print('sampling negative triplets...')
for i, (head, tail, rel) in tqdm(enumerate(zip(heads, tails, rels)), total=len(heads)):
neg_triplet = {'head': [[], 0], 'tail': [[], 0]}
neg_triplet['head'][0].append([head, tail, rel])
for neg_tail in range(n):
neg_head = head
if neg_head != neg_tail and adj_list[rel][neg_head, neg_tail] == 0:
neg_triplet['head'][0].append([neg_head, neg_tail, rel])
neg_triplet['tail'][0].append([head, tail, rel])
for neg_head in range(n):
neg_tail = tail
if neg_head != neg_tail and adj_list[rel][neg_head, neg_tail] == 0:
neg_triplet['tail'][0].append([neg_head, neg_tail, rel])
neg_triplet['head'][0] = np.array(neg_triplet['head'][0])
neg_triplet['tail'][0] = np.array(neg_triplet['tail'][0])
neg_triplets.append(neg_triplet)
return neg_triplets
def get_neg_samples_replacing_head_tail_from_ruleN(ruleN_pred_path, entity2id, saved_relation2id):
with open(ruleN_pred_path) as f:
pred_data = [line.split() for line in f.read().split('\n')[:-1]]
neg_triplets = []
for i in range(len(pred_data) // 3):
neg_triplet = {'head': [[], 10000], 'tail': [[], 10000]}
if pred_data[3 * i][1] in saved_relation2id:
head, rel, tail = entity2id[pred_data[3 * i][0]], saved_relation2id[pred_data[3 * i][1]], entity2id[pred_data[3 * i][2]]
for j, new_head in enumerate(pred_data[3 * i + 1][1::2]):
neg_triplet['head'][0].append([entity2id[new_head], tail, rel])
if entity2id[new_head] == head:
neg_triplet['head'][1] = j
for j, new_tail in enumerate(pred_data[3 * i + 2][1::2]):
neg_triplet['tail'][0].append([head, entity2id[new_tail], rel])
if entity2id[new_tail] == tail:
neg_triplet['tail'][1] = j
neg_triplet['head'][0] = np.array(neg_triplet['head'][0])
neg_triplet['tail'][0] = np.array(neg_triplet['tail'][0])
neg_triplets.append(neg_triplet)
return neg_triplets
def incidence_matrix(adj_list):
'''
adj_list: List of sparse adjacency matrices
'''
rows, cols, dats = [], [], []
dim = adj_list[0].shape
for adj in adj_list:
adjcoo = adj.tocoo()
rows += adjcoo.row.tolist()
cols += adjcoo.col.tolist()
dats += adjcoo.data.tolist()
row = np.array(rows)
col = np.array(cols)
data = np.array(dats)
return ssp.csc_matrix((data, (row, col)), shape=dim)
def _bfs_relational(adj, roots, max_nodes_per_hop=None):
"""
BFS for graphs with multiple edge types. Returns list of level sets.
Each entry in list corresponds to relation specified by adj_list.
Modified from dgl.contrib.data.knowledge_graph to node accomodate sampling
"""
visited = set()
current_lvl = set(roots)
next_lvl = set()
while current_lvl:
for v in current_lvl:
visited.add(v)
next_lvl = _get_neighbors(adj, current_lvl)
next_lvl -= visited # set difference
if max_nodes_per_hop and max_nodes_per_hop < len(next_lvl):
next_lvl = set(random.sample(next_lvl, max_nodes_per_hop))
yield next_lvl
current_lvl = set.union(next_lvl)
def _get_neighbors(adj, nodes):
"""Takes a set of nodes and a graph adjacency matrix and returns a set of neighbors.
Directly copied from dgl.contrib.data.knowledge_graph"""
sp_nodes = _sp_row_vec_from_idx_list(list(nodes), adj.shape[1])
sp_neighbors = sp_nodes.dot(adj)
neighbors = set(ssp.find(sp_neighbors)[1]) # convert to set of indices
return neighbors
def _sp_row_vec_from_idx_list(idx_list, dim):
"""Create sparse vector of dimensionality dim from a list of indices."""
shape = (1, dim)
data = np.ones(len(idx_list))
row_ind = np.zeros(len(idx_list))
col_ind = list(idx_list)
return ssp.csr_matrix((data, (row_ind, col_ind)), shape=shape)
def get_neighbor_nodes(roots, adj, h=1, max_nodes_per_hop=None):
bfs_generator = _bfs_relational(adj, roots, max_nodes_per_hop)
lvls = list()
for _ in range(h):
try:
lvls.append(next(bfs_generator))
except StopIteration:
pass
return set().union(*lvls)
def subgraph_extraction_labeling(ind, rel, A_list, h=1, enclosing_sub_graph=False, max_nodes_per_hop=None, node_information=None, max_node_label_value=None):
# extract the h-hop enclosing subgraphs around link 'ind'
A_incidence = incidence_matrix(A_list)
A_incidence += A_incidence.T
# could pack these two into a function
root1_nei = get_neighbor_nodes(set([ind[0]]), A_incidence, h, max_nodes_per_hop)
root2_nei = get_neighbor_nodes(set([ind[1]]), A_incidence, h, max_nodes_per_hop)
subgraph_nei_nodes_int = root1_nei.intersection(root2_nei)
subgraph_nei_nodes_un = root1_nei.union(root2_nei)
# Extract subgraph | Roots being in the front is essential for labelling and the model to work properly.
if enclosing_sub_graph:
subgraph_nodes = list(ind) + list(subgraph_nei_nodes_int)
else:
subgraph_nodes = list(ind) + list(subgraph_nei_nodes_un)
subgraph = [adj[subgraph_nodes, :][:, subgraph_nodes] for adj in A_list]
labels, enclosing_subgraph_nodes = node_label_new(incidence_matrix(subgraph), max_distance=h)
pruned_subgraph_nodes = np.array(subgraph_nodes)[enclosing_subgraph_nodes].tolist()
pruned_labels = labels[enclosing_subgraph_nodes]
if max_node_label_value is not None:
pruned_labels = np.array([np.minimum(label, max_node_label_value).tolist() for label in pruned_labels])
return pruned_subgraph_nodes, pruned_labels
def remove_nodes(A_incidence, nodes):
idxs_wo_nodes = list(set(range(A_incidence.shape[1])) - set(nodes))
return A_incidence[idxs_wo_nodes, :][:, idxs_wo_nodes]
def node_label_new(subgraph, max_distance=1):
# an implementation of the proposed double-radius node labeling (DRNd L)
roots = [0, 1]
sgs_single_root = [remove_nodes(subgraph, [root]) for root in roots]
dist_to_roots = [np.clip(ssp.csgraph.dijkstra(sg, indices=[0], directed=False, unweighted=True, limit=1e6)[:, 1:], 0, 1e7) for r, sg in enumerate(sgs_single_root)]
dist_to_roots = np.array(list(zip(dist_to_roots[0][0], dist_to_roots[1][0])), dtype=int)
# dist_to_roots[np.abs(dist_to_roots) > 1e6] = 0
# dist_to_roots = dist_to_roots + 1
target_node_labels = np.array([[0, 1], [1, 0]])
labels = np.concatenate((target_node_labels, dist_to_roots)) if dist_to_roots.size else target_node_labels
enclosing_subgraph_nodes = np.where(np.max(labels, axis=1) <= max_distance)[0]
# print(len(enclosing_subgraph_nodes))
return labels, enclosing_subgraph_nodes
def ssp_multigraph_to_dgl(graph, n_feats=None):
"""
Converting ssp multigraph (i.e. list of adjs) to dgl multigraph.
"""
g_nx = nx.MultiDiGraph()
g_nx.add_nodes_from(list(range(graph[0].shape[0])))
# Add edges
for rel, adj in enumerate(graph):
# Convert adjacency matrix to tuples for nx0
nx_triplets = []
for src, dst in list(zip(adj.tocoo().row, adj.tocoo().col)):
nx_triplets.append((src, dst, {'type': rel}))
g_nx.add_edges_from(nx_triplets)
# make dgl graph
g_dgl = dgl.DGLGraph(multigraph=True)
g_dgl.from_networkx(g_nx, edge_attrs=['type'])
# add node features
if n_feats is not None:
g_dgl.ndata['feat'] = torch.tensor(n_feats)
return g_dgl
def prepare_features(subgraph, n_labels, max_n_label, n_feats=None):
# One hot encode the node label feature and concat to n_featsure
n_nodes = subgraph.number_of_nodes()
label_feats = np.zeros((n_nodes, max_n_label[0] + 1 + max_n_label[1] + 1))
label_feats[np.arange(n_nodes), n_labels[:, 0]] = 1
label_feats[np.arange(n_nodes), max_n_label[0] + 1 + n_labels[:, 1]] = 1
n_feats = | np.concatenate((label_feats, n_feats), axis=1) | numpy.concatenate |
from collections import OrderedDict
from typing import List, Tuple, Dict
import numpy as np
import torch
from .base_metric import BaseMetric
from .metric_keys import DetailMetricKey, MainMetricKey
def calc_overlap_union_iou(pred: np.ndarray or None, teacher: np.ndarray) -> Tuple[float, float, float]:
"""
:param pred: ndarray (4, )
:param teacher: ndarray (4, )
:return: overlap, union, iou
"""
teacher_area = (teacher[2] - teacher[0]) * (teacher[3] - teacher[1])
if pred is None:
return 0.0, teacher_area, 0.0
pred_area = (pred[2] - pred[0]) * (pred[3] - pred[1])
intersection_width = np.maximum(np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0]), 0)
intersection_height = np.maximum(np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1]), 0)
overlap = intersection_width * intersection_height
union = teacher_area + pred_area - overlap
iou = overlap / union
return overlap, union, iou
class DetectionIoUByClasses(BaseMetric):
def __init__(self, label_names: List[str], val_key: DetailMetricKey = DetailMetricKey.KEY_AVERAGE):
assert val_key in [DetailMetricKey.KEY_TOTAL, DetailMetricKey.KEY_AVERAGE]
self.label_names = label_names
self.union_by_classes = [0 for _ in range(len(self.label_names))]
self.overlap_by_classes = [0 for _ in range(len(self.label_names))]
self._val_key = val_key
def clone_empty(self) -> 'DetectionIoUByClasses':
return DetectionIoUByClasses(self.label_names.copy(), self._val_key)
def clone(self) -> 'DetectionIoUByClasses':
new_metric = self.clone_empty()
new_metric.union_by_classes = self.union_by_classes.copy()
new_metric.overlap_by_classes = self.overlap_by_classes.copy()
return new_metric
def calc_one_batch(self, pred: np.ndarray or torch.Tensor, teacher: np.ndarray or torch.Tensor):
"""
:param pred: (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param teacher: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
# 全探索だと遅いのでインデックスごとにまとめておく これをRecallAndPrecisionにも
batch_pred_by_class = []
for pred_bboxes in pred:
pred_bboxes_by_class = [[] for _ in range(len(self.label_names))]
for pred_bbox in pred_bboxes:
pred_bboxes_by_class[int(pred_bbox[-1])].append(pred_bbox)
batch_pred_by_class.append(pred_bboxes_by_class)
for i in range(teacher.shape[0]):
bbox_annotations = teacher[i, :, :]
bbox_annotations = bbox_annotations[bbox_annotations[:, -1] >= 0]
pred_bboxes_by_class = batch_pred_by_class[i]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[-1])
if pred[i] is None:
overlap, union, _ = calc_overlap_union_iou(None, bbox_annotation)
self.union_by_classes[label] += union
self.overlap_by_classes[label] += overlap
continue
# 教師bboxに対して当てはまりの一番良いbboxを探索
max_iou = 0
best_union, best_overlap = 0, 0
for pred_bbox in pred_bboxes_by_class[label]:
overlap, union, iou = calc_overlap_union_iou(pred_bbox, bbox_annotation)
if max_iou < iou:
max_iou = iou
best_union, best_overlap = union, overlap
if max_iou <= 0:
overlap, union, _ = calc_overlap_union_iou(None, bbox_annotation)
self.union_by_classes[label] += union
self.overlap_by_classes[label] += overlap
continue
self.union_by_classes[label] += best_union
self.overlap_by_classes[label] += best_overlap
def calc_summary(self) -> Tuple[float, Dict[str, float]]:
result = OrderedDict()
total_overlap, total_union = 0, 0
avg_iou = 0.0
for i, label_name in enumerate(self.label_names):
overlap, union = self.overlap_by_classes[i], self.union_by_classes[i]
result[label_name] = overlap / union if union > 0 else 0
total_overlap += overlap
total_union += union
avg_iou += result[label_name]
result[DetailMetricKey.KEY_AVERAGE.value] = avg_iou / len(self.label_names)
result[DetailMetricKey.KEY_TOTAL.value] = total_overlap / total_union if total_union > 0 else 0
return result[self._val_key.value], result
def clear(self):
self.union_by_classes = [0 for i in range(len(self.label_names))]
self.overlap_by_classes = [0 for i in range(len(self.label_names))]
def __add__(self, other: 'DetectionIoUByClasses') -> 'DetectionIoUByClasses':
if not isinstance(other, DetectionIoUByClasses):
raise RuntimeError(f"Bad class type. expected: {DetectionIoUByClasses.__name__}")
if len(self.label_names) != len(other.label_names):
raise RuntimeError(
f"Label count must be same. but self is {len(self.label_names)} and other is {len(other.label_names)}")
new_metric = self.clone_empty()
for i in range(len(self.union_by_classes)):
new_metric.union_by_classes[i] = self.union_by_classes[i] + other.union_by_classes[i]
new_metric.overlap_by_classes[i] = self.overlap_by_classes[i] + other.overlap_by_classes[i]
return new_metric
def __truediv__(self, num: int) -> 'DetectionIoUByClasses':
return self.clone()
class RecallAndPrecision(BaseMetric):
def __init__(self, label_names: List[str], main_val_key: MainMetricKey = MainMetricKey.KEY_F_SCORE,
sub_val_key: DetailMetricKey = DetailMetricKey.KEY_TOTAL):
self.label_names = label_names
self.tp_by_classes = [0 for _ in range(len(self.label_names))]
self.fp_by_classes = [0 for _ in range(len(self.label_names))]
self.fn_by_classes = [0 for _ in range(len(self.label_names))]
self._main_val_key = main_val_key
self._sub_val_key = sub_val_key
assert self._main_val_key in [MainMetricKey.KEY_RECALL, MainMetricKey.KEY_PRECISION,
MainMetricKey.KEY_F_SCORE]
assert self._sub_val_key in [DetailMetricKey.KEY_AVERAGE, DetailMetricKey.KEY_TOTAL]
def clone_empty(self) -> 'RecallAndPrecision':
return RecallAndPrecision(self.label_names.copy(), self._main_val_key, self._sub_val_key)
def clone(self) -> 'RecallAndPrecision':
new_metric = self.clone_empty()
new_metric.tp_by_classes = self.tp_by_classes.copy()
new_metric.fp_by_classes = self.fp_by_classes.copy()
new_metric.fn_by_classes = self.fn_by_classes.copy()
return new_metric
def calc_one_batch(self, pred: np.ndarray or torch.Tensor, teacher: np.ndarray or torch.Tensor):
"""
:param pred: (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param teacher: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return:
"""
for i in range(teacher.shape[0]):
bbox_annotations = teacher[i, :, :]
bbox_annotations = bbox_annotations[bbox_annotations[:, 4] >= 0]
pred_bboxes = pred[i].copy()
searched_index_ls = []
# 全探索だと遅いのでインデックスごとにまとめておく
idx_ls_by_classes = [[] for _ in range(len(self.label_names))]
for j, pred_bbox in enumerate(pred_bboxes):
idx_ls_by_classes[int(pred_bbox[-1])].append(j)
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[-1])
if pred_bboxes is None or len(pred_bboxes) == 0:
self.fn_by_classes[label] += 1
continue
# 教師bboxに当てはまっているbboxを探索
is_matched = False
for pred_bbox_idx in idx_ls_by_classes[label]:
if pred_bbox_idx in searched_index_ls:
continue
pred_bbox = pred_bboxes[pred_bbox_idx]
overlap, union, iou = calc_overlap_union_iou(pred_bbox, bbox_annotation)
if iou >= 0.5:
self.tp_by_classes[label] += 1
is_matched = True
searched_index_ls.append(i)
break
if not is_matched:
self.fn_by_classes[label] += 1
# 誤検出した数を算出
for i, pred_bbox in enumerate(pred_bboxes):
if i in searched_index_ls:
continue
self.fp_by_classes[int(pred_bbox[-1])] += 1
def calc_summary(self) -> Tuple[float, Dict[str, float]]:
recall_dict, precision_dict, f_score_dict = OrderedDict(), OrderedDict(), OrderedDict()
for label, label_name in enumerate(self.label_names):
tp, fp, fn = self.tp_by_classes[label], self.fp_by_classes[label], self.fn_by_classes[label]
recall = tp / (tp + fn) if tp + fn > 0 else 0
precision = tp / (tp + fp) if tp + fp > 0 else 0
f_score = (2 * recall * precision) / (recall + precision) if recall + precision > 0 else 0
recall_dict[label_name] = recall
precision_dict[label_name] = precision
f_score_dict[label_name] = f_score
recall_dict[DetailMetricKey.KEY_AVERAGE.value] = sum(recall_dict.values()) / len(self.label_names)
precision_dict[DetailMetricKey.KEY_AVERAGE.value] = sum(precision_dict.values()) / len(self.label_names)
f_score_dict[DetailMetricKey.KEY_AVERAGE.value] = sum(f_score_dict.values()) / len(self.label_names)
total_tp, total_fn, total_fp = sum(self.tp_by_classes), sum(self.fn_by_classes), sum(self.fp_by_classes)
total_recall = total_tp / (total_tp + total_fn) if total_tp + total_fn > 0 else 0
total_precision = total_tp / (total_tp + total_fp) if total_tp + total_fp > 0 else 0
total_f_score = (2 * total_recall * total_precision) / (
total_recall + total_precision) if total_recall + total_precision > 0 else 0
recall_dict[DetailMetricKey.KEY_TOTAL.value] = total_recall
precision_dict[DetailMetricKey.KEY_TOTAL.value] = total_precision
f_score_dict[DetailMetricKey.KEY_TOTAL.value] = total_f_score
result = {
MainMetricKey.KEY_RECALL.value: recall_dict,
MainMetricKey.KEY_PRECISION.value: precision_dict,
MainMetricKey.KEY_F_SCORE.value: f_score_dict
}
return result[self._main_val_key.value][self._sub_val_key.value], result
def clear(self):
self.tp_by_classes = [0 for i in range(len(self.label_names))]
self.fp_by_classes = [0 for i in range(len(self.label_names))]
self.fn_by_classes = [0 for i in range(len(self.label_names))]
def __add__(self, other: 'RecallAndPrecision') -> 'RecallAndPrecision':
if not isinstance(other, RecallAndPrecision):
raise RuntimeError(f"Bad class type. expected: {RecallAndPrecision.__name__}")
if len(self.label_names) != len(other.label_names):
raise RuntimeError(
f"Label count must be same. but self is {len(self.label_names)} and other is {len(other.label_names)}")
new_metric = self.clone_empty()
for i in range(len(self.tp_by_classes)):
new_metric.tp_by_classes[i] = self.tp_by_classes[i] + other.tp_by_classes[i]
new_metric.fp_by_classes[i] = self.fp_by_classes[i] + other.fp_by_classes[i]
new_metric.fn_by_classes[i] = self.fn_by_classes[i] + other.fn_by_classes[i]
return new_metric
def __truediv__(self, num: int) -> 'RecallAndPrecision':
return self.clone()
# TODO 実装途中
class mAPByClasses:
def __init__(self, n_classes: int):
self._n_classes = n_classes
def __call__(self, results, teachers):
average_precisions = [_ for _ in range(self._n_classes)]
for label in range(self._n_classes):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = | np.zeros((0,)) | numpy.zeros |
"""
Holds some code for analyzing the faces_basic dataset.
Eventually much of this code should be broken out to functions that are common across datasets,
then this file should hold only study-specific information.
The working directory must be ../../.. relative to this file.
Notes:
https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004660
0.15 - 200 Hz 1-pole filter
1000 Hz srate
Paper used CAR after rejecting artifacts or epileptiform activity.
58-62 Hz 3rd order Butterworth filter.
400 msec stimulus on (face or house), 400 msec ISI.
50 house and 50 face pictures per run.
Further methods from https://www.sciencedirect.com/science/article/pii/S105381191300935X
Spectral decoupling:
1-sec window centred in the middle of the stimulus.
PSD (Hann -> Fourier -> * complex conjugate)
Normalize w.r.t. mean spectrum across all segments ( psd / mean(psd) )
log(psd)
PCA to get projections from PSD to PSCs (only on freqs < 200 Hz that are not around 60Hz or its harmonics)
Online:
Spectrogram (wavelets), project each time point onto first PSC (broadband)
Smoothing (sigma = 0.05 sec)
z-scoring
exp()
Here we will take a slightly different approach:
PSD -> TensorDecomposition (trials, frequencies, channels)
Raw -> TensorDecomposition (trials, times, channels)
(? DemixingPCA ?)
@author: <NAME>
"""
from pathlib import Path
import numpy as np
DATA_ROOT = Path.cwd() / 'data' / 'kjm_ecog' / 'download' / 'faces_basic'
AREA_LABELS = [
'Temporal pole',
'Parahippocampal gyrus', # parahippocampal part of the medial occipito-temporal gyrus
'Inferior temporal gyrus',
'Middle temporal gyrus',
'fusiform gyrus', # Lateral occipito-temporal gyrus,
'Lingual gyrus', # lingual part of the medial occipito-temporal gyrus
'Inferior occipital gyrus',
'Cuneus',
'Post-ventral cingulate gyrus', # Posterior-ventral part of the
'Middle Occipital gyrus',
'occipital pole',
'precuneus',
'Superior occipital gyrus',
'Post-dorsal cingulate gyrus', # Posterior-dorsal part of the cingulate gyrus
' ',
' ',
' ',
' ',
' ',
'Non-included area',
]
def import_to_npype(subject_id):
import scipy.io
from collections import OrderedDict
from neuropype.engine import InstanceAxis, SpaceAxis, TimeAxis, Chunk, Block, Packet, Flags
data_fn = DATA_ROOT / 'data' / subject_id / (subject_id + '_faceshouses.mat')
dat_contents = scipy.io.loadmat(data_fn)
stim = dat_contents['stim'].reshape(-1) # samples x 1; uint8
data = dat_contents['data'] # samples x channels; float
srate = dat_contents['srate'][0][0]
# Time vector
tvec = np.arange(len(stim)) / srate
# Process the stimulus to get an events chunk
b_stim_onset = np.diff(np.hstack((0, stim))) != 0
b_stim_onset = np.logical_and(b_stim_onset, stim != 0)
stim_inds = np.where(b_stim_onset)[0]
stim_vals = stim[stim_inds]
stim_content = np.repeat(['ISI'], len(stim_vals)).astype(object)
stim_content[stim_vals <= 50] = 'house'
stim_content[np.logical_and(stim_vals > 50, stim_vals <= 100)] = 'face'
stim_ax = InstanceAxis(tvec[b_stim_onset], data=stim_content.tolist())
stim_ax.append_fields(['StimID'], [stim_vals])
stim_chunk = Chunk(block=Block(data=np.nan * np.ones(stim_ax.data.shape), axes=(stim_ax,)),
props=[Flags.is_event_stream])
# Get the channel labels and locations.
locs_fn = DATA_ROOT / 'locs' / (subject_id + '_xslocs.mat')
locs_contents = scipy.io.loadmat(locs_fn) # 'elcode' and 'locs'
elec_names = np.array([AREA_LABELS[el_code - 1] for el_code in locs_contents['elcode'].reshape(-1)], dtype=object)
# Append a .N to each electrode name, where N is the count of electrodes with that name.
# The below method is a little silly, but more straightforward approaches did not work in interactive debug mode.
name_counts = {_: 0 for _ in | np.unique(elec_names) | numpy.unique |
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import constant_op
import sys
import json
import math
import os
import time
import random
import sqlite3
random.seed(time.time())
from model import Model, _START_VOCAB
tf.app.flags.DEFINE_boolean("is_train", True, "Set to False to inference.")
tf.app.flags.DEFINE_integer("symbols", 30000, "vocabulary size.")
tf.app.flags.DEFINE_integer("num_entities", 21471, "entitiy vocabulary size.")
tf.app.flags.DEFINE_integer("num_relations", 44, "relation size.")
tf.app.flags.DEFINE_integer("embed_units", 300, "Size of word embedding.")
tf.app.flags.DEFINE_integer("trans_units", 100, "Size of trans embedding.")
tf.app.flags.DEFINE_integer("units", 512, "Size of each model layer.")
tf.app.flags.DEFINE_integer("layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_boolean("copy_use", True, "use copy mechanism or not.")
tf.app.flags.DEFINE_integer("batch_size", 100, "Batch size to use during training.")
tf.app.flags.DEFINE_string("data_dir", "./data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./train", "Training directory.")
tf.app.flags.DEFINE_integer("per_checkpoint", 1000, "How many steps to do per checkpoint.")
tf.app.flags.DEFINE_integer("inference_version", 0, "The version for inferencing.")
tf.app.flags.DEFINE_boolean("log_parameters", True, "Set to True to show the parameters")
tf.app.flags.DEFINE_string("inference_path", "test", "Set filename of inference, default isscreen")
FLAGS = tf.app.flags.FLAGS
if FLAGS.train_dir[-1] == '/': FLAGS.train_dir = FLAGS.train_dir[:-1]
csk_triples, csk_entities, kb_dict = [], [], []
def prepare_data(path, is_train=True):
global csk_entities, csk_triples, kb_dict
with open('%s/resource.txt' % path) as f:
d = json.loads(f.readline())
csk_triples = d['csk_triples']
csk_entities = d['csk_entities']
raw_vocab = d['vocab_dict']
kb_dict = d['dict_csk']
data_train, data_dev, data_test = [], [], []
if is_train:
with open('%s/trainset.txt' % path) as f:
for idx, line in enumerate(f):
#if idx == 100000: break
if idx % 100000 == 0: print('read train file line %d' % idx)
data_train.append(json.loads(line))
with open('%s/validset.txt' % path) as f:
for line in f:
data_dev.append(json.loads(line))
with open('%s/testset.txt' % path) as f:
for line in f:
data_test.append(json.loads(line))
return raw_vocab, data_train, data_dev, data_test
def build_vocab(path, raw_vocab, trans='transE'):
print("Creating word vocabulary...")
vocab_list = _START_VOCAB + sorted(raw_vocab, key=raw_vocab.get, reverse=True)
if len(vocab_list) > FLAGS.symbols:
vocab_list = vocab_list[:FLAGS.symbols]
print("Creating entity vocabulary...")
entity_list = ['_NONE', '_PAD_H', '_PAD_R', '_PAD_T', '_NAF_H', '_NAF_R', '_NAF_T']
with open('%s/entity.txt' % path) as f:
for i, line in enumerate(f):
e = line.strip()
entity_list.append(e)
print("Creating relation vocabulary...")
relation_list = []
with open('%s/relation.txt' % path) as f:
for i, line in enumerate(f):
r = line.strip()
relation_list.append(r)
print("Loading word vectors...")
vectors = {}
with open('%s/glove.840B.300d.txt' % path) as f:
for i, line in enumerate(f):
if i % 100000 == 0:
print(" processing line %d" % i)
s = line.strip()
word = s[:s.find(' ')]
vector = s[s.find(' ')+1:]
vectors[word] = vector
embed = []
for word in vocab_list:
if word in vectors:
vector = map(float, vectors[word].split())
else:
vector = np.zeros((FLAGS.embed_units), dtype=np.float32)
embed.append(vector)
embed = np.array(embed, dtype=np.float32)
print("Loading entity vectors...")
entity_embed = []
with open('%s/entity_%s.txt' % (path, trans)) as f:
for i, line in enumerate(f):
s = line.strip().split('\t')
entity_embed.append(map(float, s))
print("Loading relation vectors...")
relation_embed = []
with open('%s/relation_%s.txt' % (path, trans)) as f:
for i, line in enumerate(f):
s = line.strip().split('\t')
relation_embed.append(s)
entity_relation_embed = np.array(entity_embed+relation_embed, dtype=np.float32)
entity_embed = np.array(entity_embed, dtype=np.float32)
relation_embed = | np.array(relation_embed, dtype=np.float32) | numpy.array |
# encoding: utf-8
import numpy as np
import os
import torch
from PIL import Image
import matplotlib.pyplot as plt
from collections import defaultdict
import gc
from utils.AlignedTripletLoss import low_memory_local_dist
from trainers.re_ranking import re_ranking as re_ranking_func
class ResNetEvaluator:
def __init__(self, model):
self.model = model
def save_incorrect_pairs(self, distmat, queryloader, galleryloader,
g_pids, q_pids, g_camids, q_camids, savefig):
os.makedirs(savefig, exist_ok=True)
self.model.eval()
m = distmat.shape[0]
indices = np.argsort(distmat, axis=1)
for i in range(m):
for j in range(10):
index = indices[i][j]
if g_camids[index] == q_camids[i] and g_pids[index] == q_pids[i]:
continue
else:
break
if g_pids[index] == q_pids[i]:
continue
fig, axes =plt.subplots(1, 11, figsize=(12, 8))
img = queryloader.dataset.dataset[i][0]
img = Image.open(img).convert('RGB')
axes[0].set_title(q_pids[i])
axes[0].imshow(img)
axes[0].set_axis_off()
for j in range(10):
gallery_index = indices[i][j]
img = galleryloader.dataset.dataset[gallery_index][0]
img = Image.open(img).convert('RGB')
axes[j+1].set_title(g_pids[gallery_index])
axes[j+1].set_axis_off()
axes[j+1].imshow(img)
fig.savefig(os.path.join(savefig, '%d.png' %q_pids[i]))
plt.close(fig)
def evaluate(self, queryloader, galleryloader, queryFliploader, galleryFliploader,
ranks=[1, 2, 4, 5,8, 10, 16, 20], eval_flip=False, re_ranking=False, savefig=False):
self.model.eval()
qf, q_pids, q_camids = [], [], []
for inputs0, inputs1 in zip(queryloader, queryFliploader):
inputs, pids, camids = self._parse_data(inputs0)
feature0 = self._forward(inputs)
if eval_flip:
inputs, pids, camids = self._parse_data(inputs1)
feature1 = self._forward(inputs)
qf.append((feature0 + feature1) / 2.0)
else:
qf.append(feature0)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = torch.Tensor(q_pids)
q_camids = torch.Tensor(q_camids)
print("Extracted features for query set: {} x {}".format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
for inputs0, inputs1 in zip(galleryloader, galleryFliploader):
inputs, pids, camids = self._parse_data(inputs0)
feature0 = self._forward(inputs)
if eval_flip:
inputs, pids, camids = self._parse_data(inputs1)
feature1 = self._forward(inputs)
gf.append((feature0 + feature1) / 2.0)
else:
gf.append(feature0)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = torch.Tensor(g_pids)
g_camids = torch.Tensor(g_camids)
print("Extracted features for gallery set: {} x {}".format(gf.size(0), gf.size(1)))
print("Computing distance matrix")
m, n = qf.size(0), gf.size(0)
q_g_dist = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
q_g_dist.addmm_(1, -2, qf, gf.t())
if re_ranking:
q_q_dist = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
q_q_dist.addmm_(1, -2, qf, qf.t())
g_g_dist = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
g_g_dist.addmm_(1, -2, gf, gf.t())
q_g_dist = q_g_dist.numpy()
q_g_dist[q_g_dist < 0] = 0
q_g_dist = np.sqrt(q_g_dist)
q_q_dist = q_q_dist.numpy()
q_q_dist[q_q_dist < 0] = 0
q_q_dist = np.sqrt(q_q_dist)
g_g_dist = g_g_dist.numpy()
g_g_dist[g_g_dist < 0] = 0
g_g_dist = np.sqrt(g_g_dist)
distmat = torch.Tensor(re_ranking_func(q_g_dist, q_q_dist, g_g_dist))
else:
distmat = q_g_dist
if savefig:
print("Saving fingure")
self.save_incorrect_pairs(distmat.numpy(), queryloader, galleryloader,
g_pids.numpy(), q_pids.numpy(), g_camids.numpy(), q_camids.numpy(), savefig)
print("Computing CMC and mAP")
cmc, mAP = self.eval_func_gpu(distmat, q_pids, g_pids, q_camids, g_camids)
print("Results ----------")
print("mAP: {:.1%}".format(mAP))
print("CMC curve")
for r in ranks:
print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
print("------------------")
del(qf)
del(gf)
try:
gc.collect()
except:
print('cannot collect.....')
return cmc[0]
def _parse_data(self, inputs):
imgs, pids, camids = inputs
return imgs.cuda(), pids, camids
def _forward(self, inputs):
with torch.no_grad():
feature = self.model(inputs)
return feature.cpu()
def eval_func_gpu(self, distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50):
num_q, num_g = distmat.size()
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
_, indices = torch.sort(distmat, dim=1)
matches = g_pids[indices] == q_pids.view([num_q, -1])
keep = ~((g_pids[indices] == q_pids.view([num_q, -1])) & (g_camids[indices] == q_camids.view([num_q, -1])))
#keep = g_camids[indices] != q_camids.view([num_q, -1])
results = []
num_rel = []
for i in range(num_q):
m = matches[i][keep[i]]
if m.any():
num_rel.append(m.sum())
results.append(m[:max_rank].unsqueeze(0))
matches = torch.cat(results, dim=0).float()
num_rel = torch.Tensor(num_rel)
cmc = matches.cumsum(dim=1)
cmc[cmc > 1] = 1
all_cmc = cmc.sum(dim=0) / cmc.size(0)
pos = torch.Tensor(range(1, max_rank+1))
temp_cmc = matches.cumsum(dim=1) / pos * matches
AP = temp_cmc.sum(dim=1) / num_rel
mAP = AP.sum() / AP.size(0)
return all_cmc.numpy(), mAP.item()
def eval_func(self, distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50):
"""Evaluation with market1501 metric
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = np.argsort(distmat, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP
class AlignedEvaluator:
def __init__(self, model):
self.model = model
def eval_cuhk03(self, distmat, q_pids, g_pids, q_camids, g_camids, max_rank, N=100):
"""Evaluation with cuhk03 metric
Key: one image for each gallery identity is randomly sampled for each query identity.
Random sampling is performed N times (default: N=100).
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = np.argsort(distmat, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
orig_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
kept_g_pids = g_pids[order][keep]
g_pids_dict = defaultdict(list)
for idx, pid in enumerate(kept_g_pids):
g_pids_dict[pid].append(idx)
cmc, AP = 0., 0.
for repeat_idx in range(N):
mask = np.zeros(len(orig_cmc), dtype=np.bool)
for _, idxs in g_pids_dict.items():
# randomly sample one image for each gallery person
rnd_idx = np.random.choice(idxs)
mask[rnd_idx] = True
masked_orig_cmc = orig_cmc[mask]
_cmc = masked_orig_cmc.cumsum()
_cmc[_cmc > 1] = 1
cmc += _cmc[:max_rank].astype(np.float32)
# compute AP
num_rel = masked_orig_cmc.sum()
tmp_cmc = masked_orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * masked_orig_cmc
AP += tmp_cmc.sum() / num_rel
cmc /= N
AP /= N
all_cmc.append(cmc)
all_AP.append(AP)
num_valid_q += 1.
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP
def eval_market1501(self, distmat, q_pids, g_pids, q_camids, g_camids, max_rank):
"""Evaluation with market1501 metric
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = np.argsort(distmat, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
orig_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = | np.asarray(all_cmc) | numpy.asarray |
import numpy as np
# 2013-10-31 Added MultiRate class, simplified fitting methods, removed full_output parameter
# 2014-12-18 Add loading of Frequency, Integration time and Iterations, calculate lower
# bound on errors from Poisson distribution
# 2015-01-28 Simplified fitting again. Needs more work
# 2015-03-02 Added functions for number density calculation
_verbosity = 2
def set_verbosity(level):
"""
0: serious/unrecoverable error
1: recoverable error
2: warning
3: information
"""
global _verbosity
_verbosity = level
def warn(message, level):
if level <= _verbosity:
print(message)
def fitter(p0, errfunc, args):
from lmfit import minimize
result = minimize(errfunc, p0, args=args, nan_policy="omit")
if not result.success:
msg = " Optimal parameters not found: " + result.message
raise RuntimeError(msg)
for i, name in enumerate(result.var_names):
if result.params[name].value == result.init_vals[i]:
warn("Warning: fitter: parameter \"%s\" was not changed, it is probably redundant"%name, 2)
from scipy.stats import chi2
chi = chi2.cdf(result.chisqr, result.nfree)
if chi > 0.5: pval = -(1-chi)*2
else: pval = chi*2
pval = 1-chi
return result.params, pval, result
def dict2Params(dic):
from lmfit import Parameters
if isinstance(dic, Parameters): return dic.copy()
p = Parameters()
for key, val in dic.items():
p.add(key, value=val)
return p
P = dict2Params
class Rate:
def __init__(self, fname, full_data=False, skip_iter=[]):
import re
import datetime as dt
fr = open(fname)
state = -1
npoints = 0
nions = 0
pointno = 0
iterno = 0
ioniter = []
ionname = []
frequency = 0
integration = 0
poisson_error = True
# -1 header
# 0 init
# 1 read time
# 2 read data
for lineno, line in enumerate(fr):
# read header
if state == -1:
if lineno == 2:
T1 = line[:22].split()
T2 = line[22:].split()
self.starttime = dt.datetime.strptime(" ".join(T1), "%Y-%m-%d %H:%M:%S.%f")
self.stoptime = dt.datetime.strptime(" ".join(T2), "%Y-%m-%d %H:%M:%S.%f")
if lineno == 3:
state = 0
toks = line.split()
if len(toks) == 0:
continue
if state == 0:
if re.search("Period \(s\)=", line):
frequency = 1/float(re.search("Period \(s\)=([0-9.]+)", line).group(1))
if re.search("Frequency=", line):
frequency = float(re.search("Frequency=([0-9.]+)", line).group(1))
if re.search("Integration time \(s\)", line):
integration = float(re.search("Integration time \(s\)=([0-9.]+)", line).group(1))
if re.search("Number of Points=", line):
npoints = int(re.search("Number of Points=(\d+)", line).group(1))
if re.search("Number of Iterations=", line):
self.niter = int(re.search("Number of Iterations=(\d+)", line).group(1))
if toks[0] == "[Ion":
nions += 1
if re.search("^Iterations=", line) :
ioniter.append(int(re.search("Iterations=(\d+)", line).group(1)))
if re.search("^Name=", line) :
ionname.append(re.search("Name=(.+)$", line).group(1).strip('\"'))
if toks[0] == "Time":
if len(toks)-2 != nions:
print("Corrupt file", fname, "Wrong number of ions in the header. Trying to recover")
# Assume that the Time header is correct:
nions = len(toks)-2
ioniter = ioniter[:nions]
if len(ioniter) < nions:
warn("Corrupt file " + str(fname) + ": Iterations for all species not recorded, guessing...", 1)
while len(ioniter) < nions:
ioniter.append(ioniter[-1])
if len(ionname) < nions:
warn("Corrupt file " + str(fname) + ": Names for all species not recorded, making something up...", 2)
ionname += toks[len(ionname)+2:]
state = 1
time = []
data = np.zeros((nions, npoints, self.niter))
continue
if state == 1:
try:
newtime = float(toks[0])
except ValueError:
if pointno != npoints:
warn("Corrupt file " + fname + " trying to guess number of points", 2)
npoints = pointno
data.resize((nions, npoints, self.niter))
time = np.array(time)
state = 2
else:
time.append(newtime)
pointno += 1
if state == 2:
if toks[0] == "Iteration":
iterno = int(toks[1])-1
if iterno+1 > self.niter:
warn("Corrupt file " + fname + " trying to guess number of iterations", 2)
#msg = "Corrupt file: " + fname
#raise IOError(msg)
self.niter = iterno+1
data.resize((nions, npoints, self.niter))
pointno = 0
continue
try:
data[:, pointno, iterno] = [float(x) for x in toks][1:-1]
except ValueError:
warn("Error in file " + fname + " number of ions probably wrong")
pointno += 1
ioniter = np.array(ioniter)
# in case of multiple measurements per iteration
if iterno+1 != self.niter:
if self.niter % (iterno+1) != 0:
msg = "Corrupt file: " + fname
print(("Corrupt file " + fname + " trying to guess number of iterations:" + str(iterno+1)))
if iterno+1 < self.niter:
data = data[:,:,:iterno+1]
else:
newdata = | np.zeros((nions, npoints, iterno+1)) | numpy.zeros |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polynomial_tensor.py."""
from __future__ import absolute_import, division
import unittest
import copy
import numpy
from openfermion.ops import PolynomialTensor
from openfermion.transforms import get_fermion_operator
from openfermion.utils._slater_determinants_test import (
random_quadratic_hamiltonian)
class PolynomialTensorTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 2
self.constant = 23.0
one_body_a = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_a = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_a[0, 1] = 2
one_body_a[1, 0] = 3
two_body_a[0, 1, 0, 1] = 4
two_body_a[1, 1, 0, 0] = 5
self.one_body_a = one_body_a
self.two_body_a = two_body_a
self.polynomial_tensor_a = PolynomialTensor(
{(): self.constant, (1, 0): one_body_a, (1, 1, 0, 0): two_body_a})
self.one_body_operand = numpy.zeros((self.n_qubits, self.n_qubits))
self.two_body_operand = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
self.one_body_operand[0, 1] = 6
self.one_body_operand[1, 0] = 7
self.two_body_operand[0, 1, 0, 1] = 8
self.two_body_operand[1, 1, 0, 0] = 9
self.polynomial_tensor_operand = PolynomialTensor(
{(1, 0): self.one_body_operand,
(0, 0, 1, 1): self.two_body_operand})
self.polynomial_tensor_a_with_zeros = PolynomialTensor(
{(): self.constant, (1, 0): one_body_a, (1, 1, 0, 0): two_body_a,
(1, 1, 0, 0, 0, 0): numpy.zeros([self.n_qubits] * 6)})
one_body_na = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_na = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_na[0, 1] = -2
one_body_na[1, 0] = -3
two_body_na[0, 1, 0, 1] = -4
two_body_na[1, 1, 0, 0] = -5
self.polynomial_tensor_na = PolynomialTensor(
{(): -self.constant, (1, 0): one_body_na,
(1, 1, 0, 0): two_body_na})
one_body_b = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_b = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_b[0, 1] = 1
one_body_b[1, 0] = 2
two_body_b[0, 1, 0, 1] = 3
two_body_b[1, 0, 0, 1] = 4
self.polynomial_tensor_b = PolynomialTensor(
{(): self.constant, (1, 0): one_body_b,
(1, 1, 0, 0): two_body_b})
one_body_ab = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_ab = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_ab[0, 1] = 3
one_body_ab[1, 0] = 5
two_body_ab[0, 1, 0, 1] = 7
two_body_ab[1, 0, 0, 1] = 4
two_body_ab[1, 1, 0, 0] = 5
self.polynomial_tensor_ab = PolynomialTensor(
{(): 2.0 * self.constant, (1, 0): one_body_ab,
(1, 1, 0, 0): two_body_ab})
constant_axb = self.constant * self.constant
one_body_axb = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_axb = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_axb[0, 1] = 2
one_body_axb[1, 0] = 6
two_body_axb[0, 1, 0, 1] = 12
self.polynomial_tensor_axb = PolynomialTensor(
{(): constant_axb, (1, 0): one_body_axb,
(1, 1, 0, 0): two_body_axb})
self.n_qubits_plus_one = self.n_qubits + 1
one_body_c = numpy.zeros((self.n_qubits_plus_one,
self.n_qubits_plus_one))
two_body_c = numpy.zeros((self.n_qubits_plus_one,
self.n_qubits_plus_one,
self.n_qubits_plus_one,
self.n_qubits_plus_one))
one_body_c[0, 1] = 1
one_body_c[1, 0] = 2
two_body_c[0, 1, 0, 1] = 3
two_body_c[1, 0, 0, 1] = 4
self.polynomial_tensor_c = PolynomialTensor(
{(): self.constant, (1, 0): one_body_c,
(1, 1, 0, 0): two_body_c})
one_body_hole = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_hole = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_hole[0, 1] = 2
one_body_hole[1, 0] = 3
two_body_hole[0, 1, 0, 1] = 4
two_body_hole[1, 1, 0, 0] = 5
self.polynomial_tensor_hole = PolynomialTensor(
{(): self.constant, (0, 1): one_body_hole,
(0, 0, 1, 1): two_body_hole})
one_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))
two_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits,
2 * self.n_qubits, 2 * self.n_qubits))
one_body_spinful[0, 1] = 2
one_body_spinful[1, 0] = 3
one_body_spinful[2, 3] = 6
one_body_spinful[3, 2] = 7
two_body_spinful[0, 1, 0, 1] = 4
two_body_spinful[1, 1, 0, 0] = 5
two_body_spinful[2, 1, 2, 3] = 8
two_body_spinful[3, 3, 2, 2] = 9
self.polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
def test_setitem_1body(self):
expected_one_body_tensor = numpy.array([[0, 3], [2, 0]])
self.polynomial_tensor_a[(0, 1), (1, 0)] = 3
self.polynomial_tensor_a[(1, 1), (0, 0)] = 2
self.assertTrue(numpy.allclose(
self.polynomial_tensor_a.n_body_tensors[(1, 0)],
expected_one_body_tensor))
def test_getitem_1body(self):
self.assertEqual(self.polynomial_tensor_c[(0, 1), (1, 0)], 1)
self.assertEqual(self.polynomial_tensor_c[(1, 1), (0, 0)], 2)
def test_setitem_2body(self):
self.polynomial_tensor_a[(0, 1), (1, 1), (1, 0), (0, 0)] = 3
self.polynomial_tensor_a[(1, 1), (0, 1), (0, 0), (1, 0)] = 2
self.assertEqual(
self.polynomial_tensor_a.n_body_tensors[
(1, 1, 0, 0)][0, 1, 1, 0], 3)
self.assertEqual(
self.polynomial_tensor_a.n_body_tensors[
(1, 1, 0, 0)][1, 0, 0, 1], 2)
def test_getitem_2body(self):
self.assertEqual(
self.polynomial_tensor_c[(0, 1), (1, 1), (0, 0), (1, 0)], 3)
self.assertEqual(
self.polynomial_tensor_c[(1, 1), (0, 1), (0, 0), (1, 0)], 4)
def test_invalid_getitem_indexing(self):
with self.assertRaises(KeyError):
self.polynomial_tensor_a[(0, 1), (1, 1), (0, 0)]
def test_invalid_setitem_indexing(self):
test_tensor = copy.deepcopy(self.polynomial_tensor_a)
with self.assertRaises(KeyError):
test_tensor[(0, 1), (1, 1), (0, 0)] = 5
def test_eq(self):
self.assertEqual(self.polynomial_tensor_a,
self.polynomial_tensor_a)
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_hole)
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_spinful)
# OK to have different keys if arrays for differing keys are 0-arrays
self.assertEqual(self.polynomial_tensor_a,
self.polynomial_tensor_a_with_zeros)
self.assertEqual(self.polynomial_tensor_a_with_zeros,
self.polynomial_tensor_a)
def test_ne(self):
self.assertNotEqual(self.polynomial_tensor_a,
self.polynomial_tensor_b)
def test_add(self):
new_tensor = self.polynomial_tensor_a + self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_ab)
def test_iadd(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor += self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_ab)
def test_invalid_addend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a + 2
def test_invalid_tensor_shape_add(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a + self.polynomial_tensor_c
def test_different_keys_add(self):
result = self.polynomial_tensor_a + self.polynomial_tensor_operand
expected = PolynomialTensor(
{(): self.constant,
(1, 0): numpy.add(self.one_body_a, self.one_body_operand),
(1, 1, 0, 0): self.two_body_a,
(0, 0, 1, 1): self.two_body_operand})
self.assertEqual(result, expected)
def test_neg(self):
self.assertEqual(-self.polynomial_tensor_a,
self.polynomial_tensor_na)
def test_sub(self):
new_tensor = self.polynomial_tensor_ab - self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_a)
def test_isub(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_ab)
new_tensor -= self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_a)
def test_invalid_subtrahend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a - 2
def test_invalid_tensor_shape_sub(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a - self.polynomial_tensor_c
def test_different_keys_sub(self):
result = self.polynomial_tensor_a - self.polynomial_tensor_operand
expected = PolynomialTensor(
{(): self.constant,
(1, 0): numpy.subtract(self.one_body_a, self.one_body_operand),
(1, 1, 0, 0): self.two_body_a,
(0, 0, 1, 1): self.two_body_operand})
self.assertEqual(result, expected)
def test_mul(self):
new_tensor = self.polynomial_tensor_a * self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_axb)
new_tensor_1 = self.polynomial_tensor_a * 2.
new_tensor_2 = 2. * self.polynomial_tensor_a
self.assertEqual(new_tensor_1, PolynomialTensor(
{(): self.constant * 2.,
(1, 0): self.one_body_a * 2.,
(1, 1, 0, 0): self.two_body_a * 2.}))
self.assertEqual(new_tensor_2, PolynomialTensor(
{(): self.constant * 2.,
(1, 0): self.one_body_a * 2.,
(1, 1, 0, 0): self.two_body_a * 2.}))
self.assertEqual(get_fermion_operator(new_tensor_1),
get_fermion_operator(self.polynomial_tensor_a) * 2.)
self.assertEqual(get_fermion_operator(new_tensor_2),
get_fermion_operator(self.polynomial_tensor_a) * 2.)
def test_imul(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor *= self.polynomial_tensor_b
self.assertEqual(new_tensor, self.polynomial_tensor_axb)
def test_invalid_multiplier(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a * 'a'
def test_invalid_tensor_shape_mult(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a * self.polynomial_tensor_c
def test_different_keys_mult(self):
result = self.polynomial_tensor_a * self.polynomial_tensor_operand
expected = PolynomialTensor(
{(1, 0): numpy.multiply(self.one_body_a, self.one_body_operand)})
self.assertEqual(result, expected)
def test_div(self):
new_tensor = self.polynomial_tensor_a / 2.
self.assertEqual(new_tensor, PolynomialTensor(
{(): self.constant / 2.,
(1, 0): self.one_body_a / 2.,
(1, 1, 0, 0): self.two_body_a / 2.}))
self.assertEqual(get_fermion_operator(new_tensor),
get_fermion_operator(self.polynomial_tensor_a) / 2.)
def test_idiv(self):
new_tensor = copy.deepcopy(self.polynomial_tensor_a)
new_tensor /= 3.
self.assertEqual(new_tensor, PolynomialTensor(
{(): self.constant / 3.,
(1, 0): self.one_body_a / 3.,
(1, 1, 0, 0): self.two_body_a / 3.}))
self.assertEqual(get_fermion_operator(new_tensor),
get_fermion_operator(self.polynomial_tensor_a) / 3.)
def test_invalid_dividend(self):
with self.assertRaises(TypeError):
self.polynomial_tensor_a / 'a'
def test_iter_and_str(self):
one_body = numpy.zeros((self.n_qubits, self.n_qubits))
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body[0, 1] = 11.0
two_body[0, 1, 1, 0] = 22.0
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_str = ('() 23.0\n((0, 1), (1, 0)) 11.0\n'
'((0, 1), (1, 1), (1, 0), (0, 0)) 22.0\n')
self.assertEqual(str(polynomial_tensor), want_str)
self.assertEqual(polynomial_tensor.__repr__(), want_str)
def test_rotate_basis_identical(self):
rotation_matrix_identical = numpy.zeros((self.n_qubits, self.n_qubits))
rotation_matrix_identical[0, 0] = 1
rotation_matrix_identical[1, 1] = 1
one_body = numpy.zeros((self.n_qubits, self.n_qubits))
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))
two_body_spinful = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits,
2 * self.n_qubits, 2 * self.n_qubits))
i = 0
j = 0
for p in range(self.n_qubits):
for q in range(self.n_qubits):
one_body[p, q] = i
one_body_spinful[p, q] = i
one_body_spinful[p + self.n_qubits, q + self.n_qubits] = i
i = i + 1
for r in range(self.n_qubits):
for s in range(self.n_qubits):
two_body[p, q, r, s] = j
two_body_spinful[p, q, r, s] = j
two_body_spinful[p + self.n_qubits,
q + self.n_qubits,
r + self.n_qubits,
s + self.n_qubits] = j
j = j + 1
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
want_polynomial_tensor_spinful = PolynomialTensor(
{(): self.constant, (1, 0): one_body_spinful,
(1, 1, 0, 0): two_body_spinful})
polynomial_tensor.rotate_basis(rotation_matrix_identical)
polynomial_tensor_spinful.rotate_basis(rotation_matrix_identical)
self.assertEqual(polynomial_tensor, want_polynomial_tensor)
self.assertEqual(polynomial_tensor_spinful,
want_polynomial_tensor_spinful)
def test_rotate_basis_reverse(self):
rotation_matrix_reverse = numpy.zeros((self.n_qubits, self.n_qubits))
rotation_matrix_reverse[0, 1] = 1
rotation_matrix_reverse[1, 0] = 1
one_body = numpy.zeros((self.n_qubits, self.n_qubits))
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
one_body_reverse = numpy.zeros((self.n_qubits, self.n_qubits))
two_body_reverse = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits))
i = 0
j = 0
i_reverse = pow(self.n_qubits, 2) - 1
j_reverse = pow(self.n_qubits, 4) - 1
for p in range(self.n_qubits):
for q in range(self.n_qubits):
one_body[p, q] = i
i = i + 1
one_body_reverse[p, q] = i_reverse
i_reverse = i_reverse - 1
for r in range(self.n_qubits):
for s in range(self.n_qubits):
two_body[p, q, r, s] = j
j = j + 1
two_body_reverse[p, q, r, s] = j_reverse
j_reverse = j_reverse - 1
polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body, (1, 1, 0, 0): two_body})
want_polynomial_tensor = PolynomialTensor(
{(): self.constant, (1, 0): one_body_reverse,
(1, 1, 0, 0): two_body_reverse})
polynomial_tensor.rotate_basis(rotation_matrix_reverse)
self.assertEqual(polynomial_tensor, want_polynomial_tensor)
def test_rotate_basis_quadratic_hamiltonian_real(self):
self.do_rotate_basis_quadratic_hamiltonian(True)
def test_rotate_basis_quadratic_hamiltonian_complex(self):
self.do_rotate_basis_quadratic_hamiltonian(False)
def do_rotate_basis_quadratic_hamiltonian(self, real):
"""Test diagonalizing a quadratic Hamiltonian that conserves particle
number."""
n_qubits = 5
# Initialize a particle-number-conserving quadratic Hamiltonian
# and compute its orbital energies
quad_ham = random_quadratic_hamiltonian(n_qubits, True, real=real)
orbital_energies, constant = quad_ham.orbital_energies()
# Rotate a basis where the Hamiltonian is diagonal
diagonalizing_unitary = quad_ham.diagonalizing_bogoliubov_transform()
quad_ham.rotate_basis(diagonalizing_unitary.T)
# Check that the rotated Hamiltonian is diagonal with the correct
# orbital energies
D = numpy.zeros((n_qubits, n_qubits), dtype=complex)
D[numpy.diag_indices(n_qubits)] = orbital_energies
self.assertTrue(numpy.allclose(quad_ham.combined_hermitian_part, D))
# Check that the new Hamiltonian still conserves particle number
self.assertTrue(quad_ham.conserves_particle_number)
# Check that the orbital energies and constant are the same
new_orbital_energies, new_constant = quad_ham.orbital_energies()
self.assertTrue( | numpy.allclose(orbital_energies, new_orbital_energies) | numpy.allclose |
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import spikewarp as sw
"""
Class and helpers for main clustering meta analyses
"""
class MetaClusterAnalysisHolder(object):
def __init__(self, shuffle_option_string, is_mainz=True):
self.shuffle_option_string = shuffle_option_string
self.suf = "_" + shuffle_option_string
self.is_mainz = is_mainz
self.pdds = {}
self.sdds = {}
for data_name in sw.list_of_first_stage_data_names:
self.pdds.update({data_name: []})
for data_name in sw.list_of_second_stage_data_names:
self.sdds.update({data_name: []})
self.final_angled_cluster_count = 0
self.did_contribute_atleast_one_final_angled_cluster_count = 0
self.all_both_spiking_reliabilities = []; self.all_both_spiking_reliabilities_0s_removed = []
self.all_number_of_conjunctive_trials = []; self.all_number_of_conjunctive_trials_0s_removed = []
def extend_standard_cluster_arrays(self, single_clustering):
if (single_clustering.do_use_clusters_in_analysis):
self.final_angled_cluster_count += single_clustering.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += single_clustering.was_first_single_clustering_to_pass_for_pair
for key in single_clustering.primary_data_dicts.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(single_clustering.primary_data_dicts[key])
for key in single_clustering.secondary_data_dicts.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(single_clustering.secondary_data_dicts[key])
def extend_standard_cluster_arrays_using_another_mcah(self, mcah):
self.final_angled_cluster_count += mcah.final_angled_cluster_count
self.did_contribute_atleast_one_final_angled_cluster_count += mcah.did_contribute_atleast_one_final_angled_cluster_count
for key in mcah.pdds.keys():
if (key not in self.pdds.keys()):
self.pdds[key] = []
self.pdds[key].extend(mcah.pdds[key])
for key in mcah.sdds.keys():
if (key not in self.sdds.keys()):
self.sdds[key] = []
self.sdds[key].extend(mcah.sdds[key])
def calculate_time_span_info_and_plots(self, directory_holder, cortical_onset, time_window_following_cortical_onset, end_of_spiking_activity):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
tex_tag_file_name = dh.collated_root_output_directory + "AnalysisOutputLatexTimeSpan.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
# Cluster Time Spans
sw.basic_x_y_plot([pdds['FlatClusterStats_FlatCluster_FS_Mean0']], [pdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "PrimaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([sdds['FlatClusterStats_FlatCluster_FS_Mean0']], [sdds['FlatClusterStats_FlatCluster_FS_Mean1']], dh.clus_time_spans_dir + "SecondaryClusterMeans" + suf, s=4, draw_y_equals_x=True, y_equals_x_max=100, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10])
sw.basic_x_y_plot([2.0*np.hstack((pdds['FlatClusterStats_FlatCluster_N0_FS_SD'], pdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((pdds['FlatClusterStats_FlatCluster_FS_Mean0'], pdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "PrimaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
sw.basic_x_y_plot([2.0*np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))], [np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))], dh.clus_time_spans_dir + "SecondaryClusterMeans_VS_2sds" + suf, s=4, x_axis_label='ms', y_axis_label='ms', scatter_point_color_groups=['g'], custom_x_tick_locators=[50, 10], opt_x_and_y_max=[40.0, 100.0], y_axis_on_right=False)
secondary_flat_cluster_means = np.hstack((sdds['FlatClusterStats_FlatCluster_FS_Mean0'], sdds['FlatClusterStats_FlatCluster_FS_Mean1']))
secondary_flat_cluster_pre_limits = secondary_flat_cluster_means - 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
secondary_flat_cluster_post_limits = secondary_flat_cluster_means + 4.0 * np.hstack((sdds['FlatClusterStats_FlatCluster_N0_FS_SD'], sdds['FlatClusterStats_FlatCluster_N1_FS_SD']))
sw.normal_histo_plot([secondary_flat_cluster_post_limits], dh.clus_time_spans_dir + "LimitsOfFlatClustersForAngledClustersOnly" + suf, bins=20, histo_range=[0.0, 100.0], x_axis_label="ms", y_axis_label="Frequency", custom_x_tick_locators=[100.0, 10.0], custom_y_tick_locators=[10.0, 10.0], alpha=0.78, add_chi_squared_text=True)
time_threshold = cortical_onset + time_window_following_cortical_onset
num_before = np.sum(secondary_flat_cluster_post_limits < time_threshold)
num_after = np.sum(secondary_flat_cluster_post_limits > time_threshold)
percent_before = 100.0 * float(num_before) / float(num_after + num_before)
percent_before_string = "{:.{}f}".format(percent_before, 1)
data_part = percent_before_string + "\\%"
cluster_time_span_string = "As " + data_part + " of Stage 2 clusters extracted over 90ms following cortical activation onset lied within " + str(int(time_window_following_cortical_onset)) + "ms following onset (Supplementary Fig. 12), analysis was constrained to spikes in the first " + str(int(time_window_following_cortical_onset)) + "ms following activation onset. "
sw.append_new_tag(data_part, "ClusterTimeSpanSummaryNum", tex_tag_file_name)
sw.append_new_tag(cluster_time_span_string, "ClusterTimeSpanSummary", tex_tag_file_name)
def plot_p_value_histos(self, directory_holder, do_extra_plots=False):
sdds = self.sdds
pdds = self.pdds
dh = directory_holder
suf = self.suf
plot_all_lag_histograms = False
if (do_extra_plots):
plot_all_lag_histograms = True
tex_tag_file_name = dh.collated_root_output_directory + suf + "AnalysisOutputLatex.tex"
with open(tex_tag_file_name, "w") as tex_file:
print(f"", file=tex_file)
specific_prim_clus_corr_dir = dh.prim_clus_corr_dir + suf + "/"; sw.makedirs(specific_prim_clus_corr_dir)
specific_sec_clus_corr_dir = dh.sec_clus_corr_dir + suf + "/"; sw.makedirs(specific_sec_clus_corr_dir)
# Cluster Correlations Primary
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_ZoomHist", bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[30, 30], alpha=0.78, add_chi_squared_text=True)
flat_cluster_correlations_chi_squared_table_strings_array = sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_CumHist", bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "PVal_LowResHist", bins=40, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 100], alpha=0.78, add_chi_squared_text=True)
sw.cumulative_histo_plot([pdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_prim_clus_corr_dir + "LowRes_LowResCumHist", bins=20, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", add_chi_squared_text=True)
if ('FlatClusterStats_FlatCluster_LR_rsquared' in sdds.keys()):
# Cluster Correlations Secondary
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared'], sdds['FlatClusterStats_FlatCluster_LR_rvalue']], specific_sec_clus_corr_dir + "RVal_Hist", bins=40, histo_range=[-1.0, 1.0], x_axis_left_buffer=0.01, x_axis_label="$r$, $r^2$", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[50, 10], alpha=0.78)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_LR_rsquared']], specific_sec_clus_corr_dir + "R^2_Hist", colors=['g'], bins=20, x_axis_left_buffer=0.01, x_axis_label="r^2-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[20, 20])
cluster_p_minus_unclustered_conj_p = np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])
num_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p < 0.0)
num_not_improved_by_clustering = np.sum(cluster_p_minus_unclustered_conj_p >= 0.0)
percent_improved_by_clustering = 100.0 * float(num_improved_by_clustering) / float(num_improved_by_clustering + num_not_improved_by_clustering)
percent_improved_by_clustering_string = "{:.{}f}".format(percent_improved_by_clustering, 1)
num_non_significant_before_clustering = np.sum(np.asarray(sdds['Unclustered_Conj_LR_pvalue']) > 0.05)
num_sdd_clusters = len(sdds['Unclustered_Conj_LR_pvalue'])
percent_non_significant_before_clustering = 100.0*(num_non_significant_before_clustering/num_sdd_clusters)
percent_non_significant_before_clustering_string = "{:.{}f}".format(percent_non_significant_before_clustering, 1)
sw.basic_x_y_plot([sdds['Unclustered_Conj_LR_pvalue']], [sdds['FlatClusterStats_FlatCluster_LR_pvalue']], specific_sec_clus_corr_dir + "NonConjPVal_Vs_ClusPVal", draw_y_equals_x=True, y_equals_x_max=1.0, x_axis_label='p-value', y_axis_label='p-value', scatter_point_color_groups=['b'], custom_x_tick_locators=[1.0, 0.2], dashes=(8, 2))
sw.normal_histo_plot([sdds['Unclustered_Conj_LR_pvalue']], specific_sec_clus_corr_dir + "ConjPVal_Vs_ClusPVal", bins=20, histo_range=[0.0, 1.0], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
sw.normal_histo_plot([np.asarray(sdds['FlatClusterStats_FlatCluster_LR_pvalue']) - np.asarray(sdds['Unclustered_Conj_LR_pvalue'])], specific_sec_clus_corr_dir + "ClusPVal_Minus_ConjPVal_Hist", bins=21, histo_range=[-1.0, 0.05], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[1.0, 0.2], custom_y_tick_locators=[10, 10], alpha=0.78)
# Cluster Differences Correlations
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_ZoomHist" + suf, bins=20, histo_range=[0.0, 0.1], x_axis_label="p-value", y_axis_label="Frequency", custom_x_tick_locators=[0.1, 0.01], custom_y_tick_locators=[200, 200], alpha=0.78, add_chi_squared_text=True)
differences_chi_squared_table_strings_array = sw.cumulative_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_CumHist" + suf, bins=200, x_axis_label="p-value", y_axis_label="Normalised\ncumulative sum", custom_x_tick_locators=[1.0, 0.2], add_chi_squared_text=True)
sw.normal_histo_plot([sdds['FlatClusterStats_FlatCluster_Diff_LR_pvalue']], dh.clus_pair_differences_dir + "FS0_Vs_Diff_LR_PVal_LowResHist" + suf, bins=20, x_axis_label="p-value", y_axis_label="Frequency", custom_y_tick_locators=[100, 20], alpha=0.78, add_chi_squared_text=True)
# Cluster Correlation Summary Latex
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])) + " Stage 1 clusters were extracted", "NumStage1ClustersFullString", tex_tag_file_name)
sw.append_new_tag(str(len(pdds['FlatClusterStats_FlatCluster_LR_pvalue'])), "NumStage1ClustersData", tex_tag_file_name)
cluster_correlation_string0 = "Spike pairs within Stage 1 cluster ellipses were linearly correlated above chance levels (Fisher's method: " + flat_cluster_correlations_chi_squared_table_strings_array[0] + ")"
sw.append_new_tag(cluster_correlation_string0, "Stage1ClusterFisherFullString", tex_tag_file_name)
sw.append_new_tag(flat_cluster_correlations_chi_squared_table_strings_array[0], "Stage1ClusterFisherData", tex_tag_file_name)
cluster_correlation_string0p1 = "spike pair differences were correlated with the spike time of the first neuron in the pair for Stage 2 clusters (Fisher's method: " + differences_chi_squared_table_strings_array[0] + "; Fig. 3g), shows that correlations are not explained by a model of the form $s_1 = s_0 + d + independent\\_noise$ where $d$ is a fixed difference."
sw.append_new_tag(cluster_correlation_string0p1, "ClusterCorrelationSummary0p1", tex_tag_file_name)
num_greaterthan = np.sum(np.asarray(sdds['FlatClusterStats_FlatCluster_LR_rvalue']) > 0.0)
data_part = sw.percent_and_frac_string(num_greaterthan, self.final_angled_cluster_count)
cluster_correlation_string1 = data_part + " of Stage 2 clusters were positively correlated "
sw.append_new_tag(cluster_correlation_string1, "Stage2PositivelyCorrelatedFullString", tex_tag_file_name)
sw.append_new_tag(data_part, "Stage2PositivelyCorrelatedNum", tex_tag_file_name)
cluster_correlation_string2 = percent_improved_by_clustering_string + "\\% (" + str(num_improved_by_clustering) + "/" + str(num_improved_by_clustering + num_not_improved_by_clustering) + ") of the Stage 2 clusters had correlations of higher significance than correlations calculated for all unclustered first spike pairs in the originating response distribution (Fig. 3h). Moreover, " + percent_non_significant_before_clustering_string + "\\% (" + str(num_non_significant_before_clustering) + '/' + str(num_sdd_clusters) + ") of the original response distributions from which Stage 2 clusters were extracted were not correlated significantly (p>0.05) (Fig. 3h). "
sw.append_new_tag(cluster_correlation_string2, "ClusterCorrelationSummary2", tex_tag_file_name)
angled_clusters_unique_pairs_summary_string = "A total of " + str(self.final_angled_cluster_count) + " unique Stage 2 clusters were extracted from " + str(self.did_contribute_atleast_one_final_angled_cluster_count) + " unique response distributions." #, confirming that there were no repeated or similar clusters."
sw.append_new_tag(angled_clusters_unique_pairs_summary_string, "AngledClustersUniquePairsSummary", tex_tag_file_name)
# Angle Comparisons
sw.basic_x_y_plot([sdds["Original" + '_BS_PCA_mean_angle']], [sdds["SelectivelyDifferencedBoxJenkins" + '_FA_angle_BS_mean']], dh.angle_analysis_directory + "BS_PCA_VS_SelectivelyDifferencedBoxJenkins_FA_Angles" + suf, draw_y_equals_x=True, y_equals_x_max=90, x_axis_label='Degrees', y_axis_label='Degrees', s=4, scatter_point_color_groups=['g'], custom_x_tick_locators=[90, 10])
# Cluster Reliabilities
sw.plot_cluster_reliability_plots(sdds['PCA_ellipse_overall_reliability'], sdds['PCA_ellipse_conj_reliability'], dh.cluster_reliabilities_dir, suf)
analysis_dict_keys= ['Original', 'OriginalTestsPassed', "SelectivelyDifferenced", "SelectivelyDifferencedTestsPassedActuallyDifferenced", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferencedBoxJenkinsTestsPassed"]
if ('analysis_dict_member_keys' in sdds.keys()):
analysis_dict_member_keys = sdds['analysis_dict_member_keys']
for analysis_dict_key in analysis_dict_keys:
# Directories
specific_angle_analysis_dir = dh.angle_analysis_directory + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_angle_analysis_dir)
specific_nonstationarity_dir = dh.clus_non_stationarity_dir + analysis_dict_key + "/" + suf + "/"; sw.makedirs(specific_nonstationarity_dir)
sharipo_normality_specific_nonstationarity_dir = specific_nonstationarity_dir + "SharipoNormality/"; sw.makedirs(sharipo_normality_specific_nonstationarity_dir)
KPSS_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "KPSSStationarity/"; sw.makedirs(KPSS_stationarity_specific_nonstationarity_dir)
ADF_stationarity_specific_nonstationarity_dir = specific_nonstationarity_dir + "ADFStationarity/"; sw.makedirs(ADF_stationarity_specific_nonstationarity_dir)
LR_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRStationarity/"; sw.makedirs(LR_specific_nonstationarity_dir)
HZ_specific_nonstationarity_dir = specific_nonstationarity_dir + "HZStationarity/"; sw.makedirs(HZ_specific_nonstationarity_dir)
bartlett_specific_nonstationarity_dir = specific_nonstationarity_dir + "BartlettSphericity/"; sw.makedirs(bartlett_specific_nonstationarity_dir)
specific_lag_pvals_nonstationary_dir = specific_nonstationarity_dir + "LagPVals/"; sw.makedirs(specific_lag_pvals_nonstationary_dir)
LR_correlation_specific_nonstationarity_dir = specific_nonstationarity_dir + "LRCorrelation/"; sw.makedirs(LR_correlation_specific_nonstationarity_dir)
true_where_tests_not_passed_ORIGINAL = np.asarray(sdds['Original_tests_passed'])
num_tests_not_passed_ORIGINAL = np.sum(true_where_tests_not_passed_ORIGINAL == False)
if (analysis_dict_key in ["Original", "SelectivelyDifferencedBoxJenkins", "SelectivelyDifferenced"]):
num_for_type = np.sum(np.bitwise_not(np.asarray(sdds[analysis_dict_key + '_is_empty'])))
true_where_normal = np.asarray(sdds[analysis_dict_key + '_normal'])
num_normal = np.sum(true_where_normal)
where_normal = np.where(true_where_normal)
true_where_tests_passed = np.asarray(sdds[analysis_dict_key + '_tests_passed'])
num_tests_passed = np.sum(true_where_tests_passed)
where_tests_passed = np.where(true_where_tests_passed)
true_where_tests_not_passed = np.asarray(sdds[analysis_dict_key + '_tests_passed'])
num_tests_not_passed = np.sum(true_where_tests_not_passed == False)
true_where_tests_passed_and_normal = np.asarray(sdds[analysis_dict_key + '_tests_passed_and_normal'])
num_tests_passed_and_normal = np.sum(true_where_tests_passed_and_normal)
where_tests_passed_and_normal = np.where(true_where_tests_passed_and_normal)
true_where_correlated = np.asarray(sdds[analysis_dict_key + '_is_still_correlated'])
number_correlated = np.sum(true_where_correlated)
where_correlated = np.where(true_where_correlated)
true_where_tests_passed_and_correlated = np.logical_and(true_where_correlated, true_where_tests_passed)
num_tests_passed_and_correlated = np.sum(true_where_tests_passed_and_correlated)
where_tests_passed_and_correlated = np.where(true_where_tests_passed_and_correlated)
where_different_from_45 = np.logical_and(np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45']), np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_0']))
num_different_from_45 = np.sum(where_different_from_45)
true_where_correlated_and_different_from_45 = np.logical_and(true_where_correlated, np.asarray(sdds[analysis_dict_key + '_is_PCA_BS_empirical_pvalue_different_from_45']))
num_correlated_and_different_from_45 = np.sum(true_where_correlated_and_different_from_45)
where_correlated_and_different_from_45 = np.where(true_where_correlated_and_different_from_45)
true_where_correlated_and_different_from_45_tests_passed = | np.logical_and(true_where_correlated_and_different_from_45, true_where_tests_passed) | numpy.logical_and |
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
names = ['T_{eff}', '\log g', '[Fe/H]', '[\\alpha/Fe]', 'AKWISE']
units = ['K', 'dex', 'dex', 'dex', 'dex']
mins = np.array([3900, 0.7, -1.6, 0.0, -0.1])
maxs = np.array([5300, 3.9, 0.3, 0.3, 0.4])
nbins = 10
chunks = (maxs-mins)/nbins
bins = mins[:,None]+np.arange(nbins)[None,:]*chunks[:,None] # (nlabel, nbin)
direc_orig = "../run_2_train_on_good"
direc = "../run_9_more_metal_poor"
direc = "../run_9b_reddening"
all_cannon = np.load("%s/all_cannon_labels.npz" %direc)['arr_0']
all_ids = np.load("%s/all_ids.npz" %direc_orig)['arr_0']
#all_apogee = np.load("%s/all_label.npz" %direc_orig)['arr_0']
good_id = np.load("%s/tr_id.npz" %direc)['arr_0']
snr = np.load("%s/tr_snr.npz" %direc)['arr_0']
#choose = np.array([np.where(all_ids==val)[0][0] for val in good_id])
#apogee = all_apogee[choose]
apogee = np.load("%s/tr_label.npz" %direc)['arr_0']
cannon = all_cannon
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(3,2, wspace=0.3, hspace=0.3)
for i in range(0, len(names)):
name = names[i]
unit = units[i]
low = mins[i]
high = maxs[i]
val = cannon[:,i]-apogee[:,i]
y = np.zeros(nbins)
yerr = | np.zeros(y.shape) | numpy.zeros |
import numpy as np
from scipy.spatial.distance import cdist
def cmeans(data, c, h, error, maxiter, metric='euclidean', init=None, seed=None):
"""
Fuzzy c-means clustering algorithm [1].
Parameters
----------
data : 2d array, size (S, N)
Data to be clustered. N is the number of data sets; S is the number
of features within each sample vector.
c : int
Desired number of clusters or classes.
m : float
Array exponentiation applied to the membership function u_old at each
iteration, where U_new = u_old ** m.
error : float
Stopping criterion; stop early if the norm of (u[p] - u[p-1]) < error.
maxiter : int
Maximum number of iterations allowed.
metric: string
By default is set to euclidean. Passes any option accepted by
``scipy.spatial.distance.cdist``.
init : 2d array, size (S, N)
Initial fuzzy c-partitioned matrix. If none provided, algorithm is
randomly initialized.
seed : int
If provided, sets random seed of init. No effect if init is
provided. Mainly for debug/testing purposes.
Returns
-------
cntr : 2d array, size (S, c)
Cluster centers. Data for each center along each feature provided
for every cluster (of the `c` requested clusters).
u : 2d array, (S, N)
Final fuzzy c-partitioned matrix.
u0 : 2d array, (S, N)
Initial guess at fuzzy c-partitioned matrix (either provided init or
random guess used if init was not provided).
d : 2d array, (S, N)
Final Euclidian distance matrix.
jm : 1d array, length P
Objective function history.
p : int
Number of iterations run.
fpc : float
Final fuzzy partition coefficient.
Notes
-----
The algorithm implemented is from Ross et al. [1]_.
Fuzzy C-Means has a known problem with high dimensionality datasets, where
the majority of cluster centers are pulled into the overall center of
gravity. If you are clustering data with very high dimensionality and
encounter this issue, another clustering method may be required. For more
information and the theory behind this, see Winkler et al. [2]_.
References
----------
.. [1] <NAME>. Fuzzy Logic With Engineering Applications, 3rd ed.
Wiley. 2010. ISBN 978-0-470-74376-8 pp 352-353, eq 10.28 - 10.35.
.. [2] <NAME>., <NAME>., & <NAME>. Fuzzy c-means in high
dimensional spaces. 2012. Contemporary Theory and Pragmatic
Approaches in Fuzzy Computing Utilization, 1.
"""
# Setup u0
# 初始化聚类划分矩阵
if init is None:
if seed is not None:
np.random.seed(seed=seed)
n = data.shape[1]
u0 = np.random.rand(c, n)
u0 = normalize_columns(u0)
init = u0.copy()
u0 = init
u = np.fmax(u0, np.finfo(np.float64).eps)#精度的比较,小于这个精度会自动取这个值
#计算派i
s = np.sum(u, axis=1, keepdims=True)/u.shape[1]
# Initialize loop parameters
jm = np.zeros(0)
p = 0
# Main cmeans loop
while p < maxiter - 1:
u2 = u.copy()
s0 = s.copy()
[cntr, u, Jjm , d, s] = _cmeans0(data, u2, c, h, s0, metric)
jm = np.hstack((jm, Jjm))
p += 1
# Stopping rule
if np.linalg.norm(u - u2) < error:
break
# Final calculations
error = np.linalg.norm(u - u2)
fpc = _fp_coeff(u)
return cntr, u, u0, d, jm, p, fpc
def _cmeans0(data, u_old, c, h, s, metric):
"""
Single step in generic fuzzy c-means clustering algorithm.
Modified from Ross, Fuzzy Logic w/Engineering Applications (2010),
pages 352-353, equations 10.28 - 10.35.
Parameters inherited from cmeans()
"""
# Normalizing, then eliminating any potential zero values.标准化,然后消除任何潜在的零值。
u_old = normalize_columns(u_old)# 标准化,然后消除任何潜在的零值,,用于最开始的时候,归一化
u_old = np.fmax(u_old, np.finfo(np.float64).eps)#精度的比较,小于这个精度会自动取这个值
# 计算分布先验Pi [[s1],[s2],...[s..]]
s = np.sum(u_old, axis=1, keepdims=True) / u_old.shape[1] ##[c1 c2 ....cn]每个聚类中心的先验分布
s = np.fmax(s, np.finfo(np.float64).eps)#精度的比较,小于这个精度会自动取这个值
um = u_old
# Calculate cluster centers
data = data.T
# 点乘,得到聚类中心
cntr = um.dot(data) / np.atleast_2d(um.sum(axis=1)).T #待处理公式聚类中心,不用改动
d = _distance(data, cntr, metric) #待处理欧式距离公式,目前估计也不用改动
d = np.fmax(d, np.finfo(np.float64).eps)##精度的比较,小于这个精度会自动取这个值
jm = (um * d ** 2).sum()
# u = normalize_power_columns(d, - 2. / (m - 1)) #待处理划分矩阵公式
u = _uij(d, s, h)
# u = np.exp()#指数运算
return cntr, u, jm, d, s
'''
将模糊m换成正则化项系数
1.先计算派i
2.在更加派i求隶属度
3.聚类中心
'''
def _uij(d, s, h):
'''
:param d: 聚类距离矩阵
:param n: 正则化系数
:return:
'''
s1 = s.repeat(d.shape[1], axis=1)
tmp = s1*np.exp(d/(-h))
tmp = np.fmax(tmp, np.finfo(np.float64).eps)##精度的比较,小于这个精度会自动取这个值
# s2 = s.repeat(d.shape[1], axis=1)
tmp1 = np.sum(tmp, axis=0, keepdims=True)##
# 需要改的地方。。。。。
temp1 = tmp1.repeat(d.shape[0], axis=0)
u = tmp/temp1
u = normalize_columns(u)
return u
def _fp_coeff(u):
"""
Fuzzy partition coefficient `fpc` relative to fuzzy c-partitioned
matrix `u`. Measures 'fuzziness' in partitioned clustering.
Parameters
----------
u : 2d array (C, N)
Fuzzy c-partitioned matrix; N = number of data points and C = number
of clusters.
Returns
-------
fpc : float
Fuzzy partition coefficient.
"""
n = u.shape[1]
return np.trace(u.dot(u.T)) / float(n)
def _distance(data, centers, metric='euclidean'):
"""
Euclidean distance from each point to each cluster center.
Parameters
----------
data : 2d array (N x Q)
Data to be analyzed. There are N data points.
centers : 2d array (C x Q)
Cluster centers. There are C clusters, with Q features.
metric: string
By default is set to euclidean. Passes any option accepted by
``scipy.spatial.distance.cdist``.
Returns
-------
dist : 2d array (C x N)
Euclidean distance from each point, to each cluster center.
See Also
--------
scipy.spatial.distance.cdist
"""
return cdist(data, centers, metric=metric).T
"""
_normalize_columns.py : Normalize columns.
"""
# import numpy as np
def normalize_columns(columns):
"""
Normalize columns of matrix.
Parameters
----------
columns : 2d array (M x N)
Matrix with columns
Returns
-------
normalized_columns : 2d array (M x N)
columns/np.sum(columns, axis=0, keepdims=1)
"""
# broadcast sum over columns
normalized_columns = columns / np.sum(columns, axis=0, keepdims=1)
return normalized_columns
def normalize_power_columns(x, exponent):
"""
Calculate normalize_columns(x**exponent)
in a numerically safe manner.
Parameters
----------
x : 2d array (M x N)
Matrix with columns
n : float
Exponent
Returns
-------
result : 2d array (M x N)
normalize_columns(x**n) but safe
"""
assert np.all(x >= 0.0)
x = x.astype(np.float64)
# values in range [0, 1]
x = x / np.max(x, axis=0, keepdims=True)
# values in range [eps, 1]
x = np.fmax(x, | np.finfo(x.dtype) | numpy.finfo |
"""
Code for training and evaluating Pytorch models.
"""
from torch.nn.modules.loss import MarginRankingLoss, CrossEntropyLoss
from torch.optim.lr_scheduler import ReduceLROnPlateau
import logging
import numpy as np
import os
import pprint
import time
import torch
import torch.optim as optim
import models
import utils
CUDA = torch.cuda.is_available()
CONFIG = utils.read_config()
LOGGER = logging.getLogger(os.path.basename(__file__))
def calc_losses(y_hats, y, out_dims):
"""
Calculate all losses across all prediction tasks.
Also reformats 'predictions' to be a friendly pytorch tensor for later use.
TODO: this should be a class?
"""
reg_loss = MarginRankingLoss()
clf_loss = CrossEntropyLoss()
if CUDA:
reg_loss = reg_loss.cuda()
clf_loss = clf_loss.cuda()
losses, predictions = [], []
for i, out_dim in enumerate(out_dims):
y_hat = y_hats[i]
y_tru = y[:, i]
# Regression case.
if out_dim == 1:
# Required for margin ranking loss.
y_rank = get_paired_ranks(y_tru)
y1_hat, y2_hat = get_pairs(y_hat)
losses.append(reg_loss(y1_hat, y2_hat, y_rank))
predictions.append(y_hat)
# Classification case.
elif out_dim > 1:
# Cross entropy loss.
losses.append(clf_loss(y_hat, y_tru.long()))
_, preds = torch.max(y_hat.data, 1)
predictions.append(preds.float().unsqueeze(1))
predictions = torch.cat(predictions, dim=1)
return(losses, predictions)
def get_pairs(y):
"""
For an input vector y, returns vectors y1 and y2 such that y1-y2 gives all
unique pairwise subtractions possible in y.
"""
y = y.cpu()
n = len(y)
idx_y2, idx_y1 = np.where(np.tril(np.ones((n, n)), k=-1))
y1 = y[torch.LongTensor(idx_y1)]
y2 = y[torch.LongTensor(idx_y2)]
if CUDA:
y1 = y1.cuda()
y2 = y2.cuda()
return(y1, y2)
def get_paired_ranks(y):
"""
Generate y_rank (for margin ranking loss). If `y == 1` then it assumed the
first input should be ranked higher (have a larger value) than the second
input, and vice-versa for `y == -1`.
"""
y = y.cpu()
# Calculates all pairwise subtractions.
y_rank = y[np.newaxis, :] - y[:, np.newaxis]
# Edge case where the difference between 2 points is 0.
y_rank[y_rank == 0] = 1e-19
# Get the lower triangle of y_rank (ignoring the diagonal).
idx = np.where( | np.tril(y_rank, k=-1) | numpy.tril |
# Created by zenn at 2021/5/6
import torch
import os
import copy
import numpy as np
from pyquaternion import Quaternion
from datasets.data_classes import PointCloud
from scipy.spatial.distance import cdist
def random_choice(num_samples, size, replacement=False, seed=None):
if seed is not None:
generator = torch.random.manual_seed(seed)
else:
generator = None
return torch.multinomial(
torch.ones((size), dtype=torch.float32),
num_samples=num_samples,
replacement=replacement,
generator=generator
)
def regularize_pc(points, sample_size, seed=None):
# random sampling from points
num_points = points.shape[0]
new_pts_idx = None
rng = np.random if seed is None else np.random.default_rng(seed)
if num_points > 2:
if num_points != sample_size:
new_pts_idx = rng.choice(num_points, size=sample_size, replace=sample_size > num_points)
# new_pts_idx = random_choice(num_samples=sample_size, size=num_points,
# replacement=sample_size > num_points, seed=seed).numpy()
else:
new_pts_idx = np.arange(num_points)
if new_pts_idx is not None:
points = points[new_pts_idx, :]
else:
points = np.zeros((sample_size, 3), dtype='float32')
return points, new_pts_idx
def getOffsetBB(box, offset, degrees=True, use_z=False, limit_box=True):
rot_quat = Quaternion(matrix=box.rotation_matrix)
trans = np.array(box.center)
new_box = copy.deepcopy(box)
new_box.translate(-trans)
new_box.rotate(rot_quat.inverse)
if len(offset) == 3:
use_z = False
# REMOVE TRANSfORM
if degrees:
if len(offset) == 3:
new_box.rotate(
Quaternion(axis=[0, 0, 1], degrees=offset[2]))
elif len(offset) == 4:
new_box.rotate(
Quaternion(axis=[0, 0, 1], degrees=offset[3]))
else:
if len(offset) == 3:
new_box.rotate(
Quaternion(axis=[0, 0, 1], radians=offset[2]))
elif len(offset) == 4:
new_box.rotate(
Quaternion(axis=[0, 0, 1], radians=offset[3]))
if limit_box:
if offset[0] > new_box.wlh[0]:
offset[0] = np.random.uniform(-1, 1)
if offset[1] > min(new_box.wlh[1], 2):
offset[1] = np.random.uniform(-1, 1)
if use_z and offset[2] > new_box.wlh[2]:
offset[2] = 0
if use_z:
new_box.translate(np.array([offset[0], offset[1], offset[2]]))
else:
new_box.translate(np.array([offset[0], offset[1], 0]))
# APPLY PREVIOUS TRANSFORMATION
new_box.rotate(rot_quat)
new_box.translate(trans)
return new_box
def getModel(PCs, boxes, offset=0, scale=1.0, normalize=False):
"""center and merge the object pcs in boxes"""
if len(PCs) == 0:
return PointCloud(np.ones((3, 0)))
points = [np.ones((PCs[0].points.shape[0], 0), dtype='float32')]
for PC, box in zip(PCs, boxes):
cropped_PC, new_box = cropAndCenterPC(PC, box, offset=offset, scale=scale, normalize=normalize)
# try:
if cropped_PC.nbr_points() > 0:
points.append(cropped_PC.points)
PC = PointCloud(np.concatenate(points, axis=1))
return PC, new_box
def cropAndCenterPC(PC, box, offset=0, scale=1.0, normalize=False):
"""
crop and center the pc using the given box
"""
new_PC = crop_pc_axis_aligned(PC, box, offset=2 * offset, scale=4 * scale)
new_box = copy.deepcopy(box)
rot_mat = np.transpose(new_box.rotation_matrix)
trans = -new_box.center
new_PC.translate(trans)
new_box.translate(trans)
new_PC.rotate((rot_mat))
new_box.rotate(Quaternion(matrix=(rot_mat)))
new_PC = crop_pc_axis_aligned(new_PC, new_box, offset=offset, scale=scale)
if normalize:
new_PC.normalize(box.wlh)
return new_PC, new_box
def get_point_to_box_distance(pc, box):
"""
generate the BoxCloud for the given pc and box
:param pc: Pointcloud object or numpy array
:param box:
:return:
"""
if isinstance(pc, PointCloud):
points = pc.points.T # N,3
else:
points = pc # N,3
assert points.shape[1] == 3
box_corners = box.corners() # 3,8
box_centers = box.center.reshape(-1, 1) # 3,1
box_points = np.concatenate([box_centers, box_corners], axis=1) # 3,9
points2cc_dist = cdist(points, box_points.T) # N,9
return points2cc_dist
def crop_pc_axis_aligned(PC, box, offset=0, scale=1.0, return_mask=False):
"""
crop the pc using the box in the axis-aligned manner
"""
box_tmp = copy.deepcopy(box)
box_tmp.wlh = box_tmp.wlh * scale
maxi = np.max(box_tmp.corners(), 1) + offset
mini = np.min(box_tmp.corners(), 1) - offset
x_filt_max = PC.points[0, :] < maxi[0]
x_filt_min = PC.points[0, :] > mini[0]
y_filt_max = PC.points[1, :] < maxi[1]
y_filt_min = PC.points[1, :] > mini[1]
z_filt_max = PC.points[2, :] < maxi[2]
z_filt_min = PC.points[2, :] > mini[2]
close = np.logical_and(x_filt_min, x_filt_max)
close = np.logical_and(close, y_filt_min)
close = np.logical_and(close, y_filt_max)
close = np.logical_and(close, z_filt_min)
close = np.logical_and(close, z_filt_max)
new_PC = PointCloud(PC.points[:, close])
if return_mask:
return new_PC, close
return new_PC
def crop_pc_oriented(PC, box, offset=0, scale=1.0, return_mask=False):
"""
crop the pc using the exact box.
slower than 'crop_pc_axis_aligned' but more accurate
"""
box_tmp = copy.deepcopy(box)
new_PC = PointCloud(PC.points.copy())
rot_mat = np.transpose(box_tmp.rotation_matrix)
trans = -box_tmp.center
# align data
new_PC.translate(trans)
box_tmp.translate(trans)
new_PC.rotate(rot_mat)
box_tmp.rotate(Quaternion(matrix=rot_mat))
box_tmp.wlh = box_tmp.wlh * scale
maxi = np.max(box_tmp.corners(), 1) + offset
mini = np.min(box_tmp.corners(), 1) - offset
x_filt_max = new_PC.points[0, :] < maxi[0]
x_filt_min = new_PC.points[0, :] > mini[0]
y_filt_max = new_PC.points[1, :] < maxi[1]
y_filt_min = new_PC.points[1, :] > mini[1]
z_filt_max = new_PC.points[2, :] < maxi[2]
z_filt_min = new_PC.points[2, :] > mini[2]
close = np.logical_and(x_filt_min, x_filt_max)
close = np.logical_and(close, y_filt_min)
close = np.logical_and(close, y_filt_max)
close = np.logical_and(close, z_filt_min)
close = np.logical_and(close, z_filt_max)
new_PC = PointCloud(new_PC.points[:, close])
# transform back to the original coordinate system
new_PC.rotate(np.transpose(rot_mat))
new_PC.translate(-trans)
if return_mask:
return new_PC, close
return new_PC
def generate_subwindow(pc, sample_bb, scale, offset=2, oriented=True):
"""
generating the search area using the sample_bb
:param pc:
:param sample_bb:
:param scale:
:param offset:
:param oriented: use oriented or axis-aligned cropping
:return:
"""
rot_mat = np.transpose(sample_bb.rotation_matrix)
trans = -sample_bb.center
if oriented:
new_pc = PointCloud(pc.points.copy())
box_tmp = copy.deepcopy(sample_bb)
# transform to the coordinate system of sample_bb
new_pc.translate(trans)
box_tmp.translate(trans)
new_pc.rotate(rot_mat)
box_tmp.rotate(Quaternion(matrix=rot_mat))
new_pc = crop_pc_axis_aligned(new_pc, box_tmp, scale=scale, offset=offset)
else:
new_pc = crop_pc_axis_aligned(pc, sample_bb, scale=scale, offset=offset)
# transform to the coordinate system of sample_bb
new_pc.translate(trans)
new_pc.rotate(rot_mat)
return new_pc
def transform_box(box, ref_box):
box = copy.deepcopy(box)
box.translate(-ref_box.center)
box.rotate(Quaternion(matrix=ref_box.rotation_matrix.T))
return box
def get_in_box_mask(PC, box):
"""check which points of PC are inside the box"""
box_tmp = copy.deepcopy(box)
new_PC = PointCloud(PC.points.copy())
rot_mat = np.transpose(box_tmp.rotation_matrix)
trans = -box_tmp.center
# align data
new_PC.translate(trans)
box_tmp.translate(trans)
new_PC.rotate(rot_mat)
box_tmp.rotate(Quaternion(matrix=rot_mat))
maxi = np.max(box_tmp.corners(), 1)
mini = np.min(box_tmp.corners(), 1)
x_filt_max = new_PC.points[0, :] < maxi[0]
x_filt_min = new_PC.points[0, :] > mini[0]
y_filt_max = new_PC.points[1, :] < maxi[1]
y_filt_min = new_PC.points[1, :] > mini[1]
z_filt_max = new_PC.points[2, :] < maxi[2]
z_filt_min = new_PC.points[2, :] > mini[2]
close = np.logical_and(x_filt_min, x_filt_max)
close = | np.logical_and(close, y_filt_min) | numpy.logical_and |
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
import featuretools as ft
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.feature_base import DirectFeature, Feature
from featuretools.primitives import (
AggregationPrimitive,
Day,
Hour,
Minute,
Month,
Second,
TransformPrimitive,
Year
)
from featuretools.primitives.utils import PrimitivesDeserializer
from featuretools.synthesis import dfs
from featuretools.variable_types import Categorical, Datetime, Numeric
def test_direct_from_identity(es):
device = es['sessions']['device_type']
d = DirectFeature(base_feature=device, child_entity=es['log'])
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(es, feature_set=feature_set, time_last=None)
df = calculator.run(np.array([0, 5]))
if isinstance(df, dd.DataFrame):
df = df.compute().set_index('id').sort_index()
v = df[d.get_name()].tolist()
assert v == [0, 1]
def test_direct_from_variable(es):
# should be same behavior as test_direct_from_identity
device = es['sessions']['device_type']
d = DirectFeature(base_feature=device,
child_entity=es['log'])
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(es, feature_set=feature_set, time_last=None)
df = calculator.run( | np.array([0, 5]) | numpy.array |
import numpy as np
import gym
from gym import spaces
from numpy.random import default_rng
import pickle
import os
import math
import matplotlib.pyplot as plt
from PIL import Image
from gym_flp import rewards
from IPython.display import display, clear_output
import anytree
from anytree import Node, RenderTree, PreOrderIter, LevelOrderIter, LevelOrderGroupIter
'''
v0.0.3
Significant changes:
08.09.2020:
- Dicrete option removed from spaces; only Box allowed
- Classes for quadtratic set covering and mixed integer programming (-ish) added
- Episodic tasks: no more terminal states (exception: max. no. of trials reached)
12.10.2020:
- mip added
- fbs added
'''
class qapEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance=None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.DistanceMatrices, self.FlowMatrices = pickle.load(open(os.path.join(__location__,'discrete', 'qap_matrices.pkl'), 'rb'))
self.transport_intensity = None
self.instance = instance
self.mode = mode
while not (self.instance in self.DistanceMatrices.keys() or self.instance in self.FlowMatrices.keys() or self.instance in ['Neos-n6', 'Neos-n7', 'Brewery']):
print('Available Problem Sets:', self.DistanceMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.D = self.DistanceMatrices[self.instance]
self.F = self.FlowMatrices[self.instance]
# Determine problem size relevant for much stuff in here:
self.n = len(self.D[0])
# Action space has two option:
# 1) Define as Box with shape (1, 2) and allow values to range from 1 through self.n
# 2) Define as Discrete with x = 1+((n^2-n)/2) actions (one half of matrix + 1 value from diagonal) --> Omit "+1" to obtain range from 0 to x!
# self.action_space = spaces.Box(low=-1, high=6, shape=(1,2), dtype=np.int) # Doubles complexity of the problem as it allows the identical action (1,2) and (2,1)
self.action_space = spaces.Discrete(int((self.n**2-self.n)*0.5)+1)
# If you are using images as input, the input values must be in [0, 255] as the observation is normalized (dividing by 255 to have values in [0, 1]) when using CNN policies.
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape=(1, self.n, 3), dtype = np.uint8) # Image representation
elif self.mode == 'human':
self.observation_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.float32)
self.states = {} # Create an empty dictonary where states and their respective reward will be stored for future reference
self.actions = self.pairwiseExchange(self.n)
# Initialize Environment with empty state and action
self.action = None
self.state = None
self.internal_state = None
#Initialize moving target to incredibly high value. To be updated if reward obtained is smaller.
self.movingTargetReward = np.inf
self.MHC = rewards.mhc.MHC() # Create an instance of class MHC in module mhc.py from package rewards
def reset(self):
state = default_rng().choice(range(1,self.n+1), size=self.n, replace=False)
#MHC, self.TM = self.MHC.compute(self.D, self.F, state)
self.internal_state = state.copy()
return state
def step(self, action):
# Create new State based on action
fromState = self.internal_state.copy()
swap = self.actions[action]
fromState[swap[0]-1], fromState[swap[1]-1] = fromState[swap[1]-1], fromState[swap[0]-1]
newState = fromState.copy()
#MHC, self.TM = self.MHC.compute(self.D, self.F, current_permutation)
MHC, self.TM = self.MHC.compute(self.D, self.F, newState)
if self.mode == 'human':
self.states[tuple(fromState)] = MHC
if self.movingTargetReward == np.inf:
self.movingTargetReward = MHC
#reward = self.movingTargetReward - MHC
reward = -1 if MHC > self.movingTargetReward else 10
self.movingTargetReward = MHC if MHC < self.movingTargetReward else self.movingTargetReward
if self.mode == "rgb_array":
rgb = np.zeros((1,self.n,3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((fromState-np.min(fromState))/(np.max(fromState)-np.min(fromState))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(fromState):
rgb[0:1, i] = [R[s-1], G[s-1], B[s-1]]
newState = np.array(rgb)
self.state = newState.copy()
self.internal_state = fromState.copy()
return newState, reward, False, {}
def render(self, mode=None):
if self.mode == "human":
SCALE = 1 # Scale size of pixels for displayability
img_h, img_w = SCALE, (len(self.internal_state))*SCALE
data = np.zeros((img_h, img_w, 3), dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((self.internal_state-np.min(self.internal_state))/(np.max(self.internal_state)-np.min(self.internal_state))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for i, s in enumerate(self.internal_state):
data[0*SCALE:1*SCALE, i*SCALE:(i+1)*SCALE] = [R[s-1], G[s-1], B[s-1]]
img = Image.fromarray(data, 'RGB')
if self.mode == 'rgb_array':
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def close(self):
pass
def pairwiseExchange(self, x):
actions = [(i,j) for i in range(1,x) for j in range(i+1,x+1) if not i==j]
actions.append((1,1))
return actions
class fbsEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode=None, instance = None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.mode = mode
self.instance = instance
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
'''
Nomenclature:
W --> Width of Plant (y coordinate)
L --> Length of Plant (x coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to numpy indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
#if self.l is None or self.w is None:
# self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,))
# self.l = np.sqrt(self.A/self.aspect_ratio)
# self.w = np.round(self.a/self.l)
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = np.sum(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image
self.W = self.L
# Design a layout with l = 1,5 * w
#self.L = divisor(int(self.A))
#self.W = self.A/self.L
# These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone.
self.aspect_ratio = int(max(self.beta)) if not self.beta is None else 1
self.min_length = np.min(self.a) / self.L
self.min_width = np.min(self.a) / self.W
# We define minimum side lengths to be 1 in order to be displayable in array
self.min_length = 1
self.min_width = 1
self.action_space = spaces.Discrete(5) #Taken from doi:10.1016/j.engappai.2020.103697
self.actions = {0: 'Randomize', 1: 'Bit Swap', 2: 'Bay Exchange', 3: 'Inverse', 4: 'Idle'}
#self.state_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.int)
self.bay_space = spaces.Box(low=0, high = 1, shape=(self.n,), dtype=np.int) # binary vector indicating bay breaks (i = 1 means last facility in bay)
self.state = None
self.permutation = None # Permutation of all n facilities, read from top to bottom
self.bay = None
self.done = False
self.MHC = rewards.mhc.MHC()
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation
elif self.mode == "human":
observation_low = np.tile(np.array([0,0,self.min_length,self.min_width],dtype=int), self.n)
observation_high = np.tile(np.array([self.W, self.L, self.W, self.L], dtype=int), self.n)
self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = int) # Vector representation of coordinates
else:
print("Nothing correct selected")
def reset(self):
# 1. Get a random permutation and bays
self.permutation, self.bay = self.sampler()
# 2. Last position in bay break vector has to be 1 by default.
self.bay[-1] = 1
self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates()
self.D = getDistances(self.fac_x, self.fac_y)
reward, self.TM = self.MHC.compute(self.D, self.F, self.permutation[:])
self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n)
return self.state
def constructState(self, x, y, l, w, n):
# Construct state
state_prelim = np.zeros((4*n,), dtype=float)
state_prelim[0::4] = y
state_prelim[1::4] = x
state_prelim[2::4] = w
state_prelim[3::4] = l
if self.mode == "human":
self.state = np.array(state_prelim)
elif self.mode == "rgb_array":
self.state = self.ConvertCoordinatesToState(state_prelim)
return self.state[:]
def ConvertCoordinatesToState(self, state_prelim):
data = np.zeros((self.observation_space.shape)) if self.mode == 'rgb_array' else np.zeros((self.W, self.L, 3),dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
R = np.array((self.permutation-np.min(self.permutation))/(np.max(self.permutation)-np.min(self.permutation))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for x, p in enumerate(self.permutation):
x_from = state_prelim[4*x+1] -0.5 * state_prelim[4*x+3]
y_from = state_prelim[4*x+0] -0.5 * state_prelim[4*x+2]
x_to = state_prelim[4*x+1] + 0.5 * state_prelim[4*x+3]
y_to = state_prelim[4*x+0] + 0.5 * state_prelim[4*x+2]
data[int(y_from):int(y_to), int(x_from):int(x_to)] = [R[p-1], G[p-1], B[p-1]]
return np.array(data, dtype=np.uint8)
def sampler(self):
return default_rng().choice(range(1,self.n+1), size=self.n, replace=False), self.bay_space.sample()
def getCoordinates(self):
facilities = np.where(self.bay==1)[0] #Read all positions with a bay break
bays = np.split(self.permutation, facilities[:-1]+1)
lengths = np.zeros((len(self.permutation,)))
widths = np.zeros((len(self.permutation,)))
fac_x = np.zeros((len(self.permutation,)))
fac_y = np.zeros((len(self.permutation,)))
x = 0
start = 0
for b in bays: #Get the facilities that are located in the bay
areas = self.a[b-1] #Get the area associated with the facilities
end = start + len(areas)
lengths[start:end] = np.sum(areas)/self.W #Calculate all facility widhts in bay acc. to Eq. (1) in https://doi.org/10.1016/j.eswa.2011.11.046
widths[start:end] = areas/lengths[start:end]
fac_x[start:end] = lengths[start:end] * 0.5 + x
x += np.sum(areas)/self.W
y = np.ones(len(b))
ll = 0
for idx, l in enumerate(widths[start:end]):
y[idx] = ll + 0.5*l
ll += l
fac_y[start:end] = y
start = end
return fac_x, fac_y, lengths, widths
def step(self, action):
a = self.actions[action]
#k = np.count_nonzero(self.bay)
fromState = np.array(self.permutation)
# Get lists with a bay positions and facilities in each bay
facilities = np.where(self.bay==1)[0]
bay_breaks = np.split(self.bay, facilities[:-1]+1)
# Load indiv. facilities into bay acc. to breaks; omit break on last position to avoid empty array in list.
bays = np.split(self.permutation, facilities[:-1]+1)
if a == 'Randomize':
# Two vector elements randomly chosen are exchanged. Bay vector remains untouched.
k = default_rng().choice(range(len(self.permutation-1)), size=1, replace=False)
l = default_rng().choice(range(len(self.permutation-1)), size=1, replace=False)
fromState[k], fromState[l] = fromState[l], fromState[k]
self.permutation = np.array(fromState)
elif a == 'Bit Swap':
#One element randomly selected flips its value (1 to 0 or 0 to 1)
j = default_rng().choice(range(len(self.bay-1)), size=1, replace=False)
temp_bay = np.array(self.bay) # Make a copy of bay
temp_bay[j] = 1 if temp_bay[j] == 0 else 0
self.bay = np.array(temp_bay)
elif a == 'Bay Exchange':
#Two bays are randomly selected and exchange facilities contained in them
o = int(default_rng().choice(range(len(bays)), size=1, replace=False))
p = int(default_rng().choice(range(len(bays)), size=1, replace=False))
while p==o: # Make sure bays are not the same
p = int(default_rng().choice(range(len(bays)), size=1, replace=False))
# Swap bays and break points accordingly:
bays[o], bays[p] = bays[p], bays[o]
bay_breaks[o], bay_breaks[p] = bay_breaks[p], bay_breaks[o]
new_bay = np.concatenate(bay_breaks)
new_state = np.concatenate(bays)
# Make sure state is saved as copy
self.permutation = np.array(new_state)
self.bay = np.array(new_bay)
elif a == 'Inverse':
#Facilities present in a certain bay randomly chosen are inverted.
q = default_rng().choice(range(len(bays)))
bays[q] = np.flip(bays[q])
new_bay = np.concatenate(bay_breaks)
new_state = np.concatenate(bays)
# Make sure state is saved as copy
self.permutation = np.array(new_state)
self.bay = np.array(new_bay)
elif a == 'Idle':
pass # Keep old state
self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates()
self.D = getDistances(self.fac_x, self.fac_y)
reward, self.TM = self.MHC.compute(self.D, self.F, fromState)
self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n)
self.done = False #Always false for continuous task
return self.state[:], reward, self.done, {}
def render(self, mode= None):
if self.mode== "human":
# Mode 'human' needs intermediate step to convert state vector into image array
data = self.ConvertCoordinatesToState(self.state[:])
img = Image.fromarray(data, 'RGB')
if self.mode == "rgb_array":
data = self.state[:]
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
#23.02.21: Switched to data instead of img for testing video
return img
def close(self):
pass
#self.close()
class ofpEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode = None, instance = None, distance = None, aspect_ratio = None, step_size = None, greenfield = None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.mode = mode
self.aspect_ratio = 2 if aspect_ratio is None else aspect_ratio
self.step_size = 2 if step_size is None else step_size
self.greenfield = greenfield
self.instance = instance
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
self.counter = 0
self.done = False
self.pseudo_stability = 0 #If the reward has not improved in the last 200 steps, terminate the episode
self.best_reward = None
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
'''
Nomenclature:
W --> Width of Plant (x coordinate)
L --> Length of Plant (y coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to numpy indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
#if self.l is None or self.w is None:
# self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,))
# self.l = np.sqrt(self.A/self.aspect_ratio)
# self.w = np.round(self.a/self.l)
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = np.sum(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image
self.W = self.L
if self.greenfield:
self.L = 2*self.L
self.W = 2*self.W
# Design a layout with l = 1,5 * w
#self.L = divisor(int(self.A))
#self.W = self.A/self.L
# These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone.
self.aspect_ratio = int(max(self.beta)) if not self.beta is None else self.aspect_ratio
self.min_length = 1
self.min_width = 1
# 3. Define the possible actions: 5 for each box [toDo: plus 2 to manipulate sizes] + 1 idle action for all
self.actions = {}
for i in range(self.n):
self.actions[0+(i)*5] = "up"
self.actions[1+(i)*5] = "down"
self.actions[2+(i)*5] = "right"
self.actions[3+(i)*5] = "left"
self.actions[4+(i)*5] = "rotate"
self.actions[len(self.actions)] = "keep"
# 4. Define actions space as Discrete Space
self.action_space = spaces.Discrete(1+5*self.n) #5 actions for each facility: left, up, down, right, rotate + idle action across all
# 5. Set some starting points
self.reward = 0
self.state = None
self.internal_state = None #Placeholder for state variable for internal manipulation in rgb_array mode
if self.w is None or self.l is None:
self.l = np.random.randint(self.min_side_length*self.aspect_ratio, np.min(self.a), size=(self.n, ))
self.w = np.round(self.a/self.l)
# 6. Set upper and lower bound for observation space
# min x position can be point of origin (0,0) [coordinates map to upper left corner]
# min y position can be point of origin (0,0) [coordinates map to upper left corner]
# min width can be smallest area divided by its length
# min lenght can be smallest width (above) multiplied by aspect ratio
# max x pos can be bottom right edge of grid
# max y pos can be bottpm right edge of grid
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation
elif self.mode == "human":
#observation_low = np.tile(np.array([0,0,self.min_side_length, self.min_side_length],dtype=float), self.n)
#observation_high = np.tile(np.array([self.L, self.W, max(self.l), max(self.w)], dtype=float), self.n)
observation_low = np.zeros(4* self.n)
observation_high = np.zeros(4* self.n)
observation_low[0::4] = max(self.w)
observation_low[1::4] = max(self.l)
observation_low[2::4] = max(self.w)
observation_low[3::4] = max(self.l)
observation_high[0::4] = self.W - max(self.w)
observation_high[1::4] = self.L - max(self.l)
observation_high[2::4] = self.W - max(self.w)
observation_high[3::4] = self.L - max(self.l)
self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = np.uint8) # Vector representation of coordinates
else:
print("Nothing correct selected")
self.MHC = rewards.mhc.MHC()
# Set Boundaries
self.upper_bound = self.L- max(self.l)/2
self.lower_bound = 0 + max(self.l)/2
self.left_bound = 0 + max(self.w)/2
self.right_bound = self.W- max(self.w)/2
def reset(self):
# Start with random x and y positions
if self.mode == 'human':
state_prelim = self.observation_space.sample()
# Override length (l) and width (w) or facilities with data from instances
state_prelim[2::4] = self.w
state_prelim[3::4] = self.l
self.D = getDistances(state_prelim[0::4], state_prelim[1::4])
self.internal_state = np.array(state_prelim)
self.state = np.array(state_prelim)
reward, self.TM = self.MHC.compute(self.D, self.F, np.array(range(1,self.n+1)))
self.counter = 0
self.best_reward = reward
self.reward = 0
elif self.mode == 'rgb_array':
state_prelim = np.zeros((self.W, self.L, 3),dtype=np.uint8)
x = np.random.uniform(0, self.L, size=(self.n,))
y = np.random.uniform(0, self.W, size=(self.n,))
#s = self.constructState(y, x, self.w, self.l, self.n)
s = np.zeros(4*self.n)
s[0::4] = y
s[1::4] = x
s[2::4] = self.w
s[3::4] = self.l
self.internal_state = np.array(s).copy()
#self.state = self.constructState(y, x, self.w, self.l, self.n)
self.D = getDistances(s[0::4], s[1::4])
reward, self.TM = self.MHC.compute(self.D, self.F, np.array(range(1,self.n+1)))
self.state = self.ConvertCoordinatesToState(self.internal_state)
self.counter = 0
self.best_reward = reward
return self.state.copy()
# def offGrid(self, s):
# if np.any(s[0::4]-s[2::4] < 0):
# #print("Bottom bound breached")
# og = True
# elif np.any(s[0::4]+s[2::4] > self.W):
# #print("Top bound breached")
# og = True
# elif np.any(s[1::4]-s[3::4] < 0):
# #print("left bound breached")
# og = True
# elif np.any(s[1::4]+s[3::4] > self.L):
# #print("right bound breached")
# og = True
# else:
# og = False
# return og
# def collision_test(self, y, x, w, l):
# # collision test
# collision = False #initialize collision for each collision test
# for i in range (0, self.n-1):
# for j in range (i+1,self.n):
# if not(x[i]+0.5*l[i] < x[j]-0.5*l[j] or x[i]-0.5*l[i] > x[j]+0.5*l[j] or y[i]-0.5*w[i] > y[j]+0.5*w[j] or y[i]+0.5*w[i] < y[j]-0.5*w[j]):
# collision = True
# break
# return collision
def collision(self,x,y,w,l):
collision = False
for i in range(0,self.n-1):
for j in range(i+1,self.n):
if (abs(int(x[i]) - int(x[j])) < 0.5*self.w[i]+0.5*self.w[j]):
if(abs(int(y[i]) - int(y[j])) < 0.5*self.l[i]+0.5*self.l[j]):
#print(x[i],y[i],x[j],y[j])
collision = True
if (abs(int(y[i]) - int(y[j])) < 0.5*self.l[i]+0.5*self.l[j]):
if(abs(int(x[i]) - int(x[j])) < 0.5*self.w[i]+0.5*self.w[j]):
#print(x[i],y[i],x[j],y[j])
collision = True
return collision
def step(self, action):
self.reward = 0
m = np.int(np.ceil((action+1)/5)) # Facility on which the action is
# Get copy of state to manipulate:
temp_state = self.internal_state[:]
step_size = self.step_size
# Do the action
if self.actions[action] == "up":
if temp_state[4*(m-1)+1] + temp_state[4*(m-1)+3]*0.5 + step_size < self.upper_bound:
temp_state[4*(m-1)+1] += step_size
else:
temp_state[4*(m-1)+1] += 0
#print('Forbidden action: machine', m, 'left grid on upper bound')
elif self.actions[action] == "down":
if temp_state[4*(m-1)+1] - temp_state[4*(m-1)+3]*0.5 + step_size > self.lower_bound:
temp_state[4*(m-1)+1] -= step_size
else:
temp_state[4*(m-1)+1] += 0
#print('Forbidden action: machine', m, 'left grid on lower bound')
elif self.actions[action] == "right":
if temp_state[4*(m-1)]+temp_state[4*(m-1)+2]*0.5 + step_size < self.right_bound:
temp_state[4*(m-1)] += step_size
else:
temp_state[4*(m-1)] += 0
#print('Forbidden action: machine', m, 'left grid on right bound')
elif self.actions[action] == "left":
if temp_state[4*(m-1)]-temp_state[4*(m-1)+2]*0.5 + step_size > self.left_bound:
temp_state[4*(m-1)] -= step_size
else:
temp_state[4*(m-1)] += 0
#print('Forbidden action: machine', m, 'left grid on left bound')
elif self.actions[action] == "keep":
None #Leave everything as is
elif self.actions[action] == "rotate":
temp_state[4*(m-1)+2], temp_state[4*(m-1)+3] = temp_state[4*(m-1)+3], temp_state[4*(m-1)+2]
else:
raise ValueError("Received invalid action={} which is not part of the action space".format(action))
self.fac_x, self.fac_y, self.fac_b, self.fac_h = temp_state[0::4], temp_state[1::4], temp_state[2::4], temp_state[3::4] # ToDo: Read this from self.state
self.D = getDistances(self.fac_x, self.fac_y)
fromState = np.array(range(1,self.n+1)) # Need to create permutation matrix
MHC, self.TM = self.MHC.compute(self.D, self.F, fromState)
self.internal_state = np.array(temp_state) # Keep a copy of the vector representation for future steps
self.state = self.internal_state[:]
# Test if initial state causing a collision. If yes than initialize a new state until there is no collision
collision = self.collision(temp_state[0::4],temp_state[1::4], temp_state[2::4], temp_state[3::4]) # Pass every 4th item starting at 0 (x pos) and 1 (y pos) for checking
if (MHC < self.best_reward) and (collision == False) :
self.best_reward = MHC
self.reward = 50
if collision == True:
self.reward = -2
if self.mode == 'rgb_array':
self.state = self.ConvertCoordinatesToState(self.internal_state) #Retain state for internal use
self.pseudo_stability = self.counter
self.done = True if self.pseudo_stability == 200 else False
self.counter += 1
#print(self.reward)
return self.state,self.reward,self.done,{}
def ConvertCoordinatesToState(self, state_prelim):
data = np.zeros((self.observation_space.shape)) if self.mode == 'rgb_array' else np.zeros((self.W, self.L, 3),dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
p = np.arange(self.n)
R = np.array((p-np.min(p))/(np.max(p)-np.min(p))*255).astype(int)
R[R<=20] = 255
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
G[G<=20] = 255
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
B[B<=20] = 255
for x, p in enumerate(p):
x_from = state_prelim[4*x+0] -0.5 * state_prelim[4*x+2]
y_from = state_prelim[4*x+1] -0.5 * state_prelim[4*x+3]
x_to = state_prelim[4*x+0] + 0.5 * state_prelim[4*x+2]
y_to = state_prelim[4*x+1] + 0.5 * state_prelim[4*x+3]
data[int(y_from):int(y_to), int(x_from):int(x_to)] = [R[p-1], G[p-1], B[p-1]]
return np.array(data, dtype=np.uint8)
def constructState(self, x, y, b, h, n):
# Construct state
state_prelim = np.zeros((4*n,), dtype=float)
state_prelim[0::4] = x
state_prelim[1::4] = y
state_prelim[2::4] = b
state_prelim[3::4] = h
if self.mode == "human":
self.state = np.array(state_prelim)
elif self.mode == "rgb_array":
self.state = self.ConvertCoordinatesToState(state_prelim)
return self.state[:]
def render(self):
if self.mode == "human":
data = self.ConvertCoordinatesToState(self.state[:])
img = Image.fromarray(data, 'RGB')
if self.mode == "rgb_array":
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def close(self):
pass #Nothing here yet
class stsEnv(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self, mode = None, instance = None):
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb'))
self.instance = instance
self.mode = mode
self.MHC = rewards.mhc.MHC()
while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']):
print('Available Problem Sets:', self.FlowMatrices.keys())
self.instance = input('Pick a problem:').strip()
self.F = self.FlowMatrices[self.instance]
self.n = self.problems[self.instance]
self.AreaData = self.sizes[self.instance]
# Obtain size data: FBS needs a length and area
self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed
# Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5)
if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys():
self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image
self.W = int(self.LayoutWidths[self.instance])
else:
self.A = np.sum(self.a)
# Design a squared plant layout
self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image
self.W = self.L
'''
Nomenclature:
W --> Width of Plant (y coordinate)
L --> Length of Plant (x coordinate)
w --> Width of facility/bay (x coordinate)
l --> Length of facility/bay (y coordinate)
A --> Area of Plant
a --> Area of facility
Point of origin analoguous to numpy indexing (top left corner of plant)
beta --> aspect ratios (as alpha is reserved for learning rate)
'''
# Provide variables for layout encoding (epsilon in doi:10.1016/j.ejor.2018.01.001)
self.permutation = None
self.slicing = None
self.orientation_space = spaces.Box(low=0, high = 1, shape=(self.n-1,), dtype=np.int) # binary vector indicating bay breaks (i = 1 means last facility in bay)
self.state = None
if self.mode == "rgb_array":
self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation
elif self.mode == "human":
#observation_low = np.tile(np.array([0,0,self.min_side_length, self.min_side_length],dtype=float), self.n)
#observation_high = np.tile(np.array([self.L, self.W, max(self.l), max(self.w)], dtype=float), self.n)
observation_low = np.zeros(4* self.n)
observation_high = np.zeros(4* self.n)
observation_low[0::4] = 0.0 #Top-left corner y
observation_low[1::4] = 0.0 #Top-left corner x
observation_low[2::4] = 1.0 #Width
observation_low[3::4] = 1.0 #Length
observation_high[0::4] = self.W
observation_high[1::4] = self.L
observation_high[2::4] = self.W
observation_high[3::4] = self.L
self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = float) # Vector representation of coordinates
else:
print("Nothing correct selected")
self.action_space = spaces.Discrete(5)
self.actions = {0: 'Permute', 1: 'Slice_Swap', 2: 'Shuffle', 3: 'Bit_Swap', 4: 'Idle'}
def reset(self):
# 1. Get a random permutation, slicing order and orientation
self.permutation, self.slicing, self.orientation = self.sampler()
# 2. Build the tree incl. size information
s = self.TreeBuilder(self.permutation, self.slicing, self.orientation)
centers = np.array([s[0::4] + 0.5*s[2::4], s[1::4] + 0.5* s[3::4]])
self.D = getDistances(centers[0], centers[1])
reward, self.TM = self.MHC.compute(self.D, self.F, np.array(range(1,self.n+1)))
if self.mode == "human":
self.state = np.array(s)
elif self.mode == "rgb_array":
self.state = self.ConvertCoordinatesToState(s)
return self.state
def ConvertCoordinatesToState(self, s):
data = np.zeros((self.observation_space.shape)) if self.mode == 'rgb_array' else np.zeros((self.W, self.L, 3),dtype=np.uint8)
sources = np.sum(self.TM, axis = 1)
sinks = np.sum(self.TM, axis = 0)
p = self.permutation[:]
R = np.array((p-np.min(p))/(np.max(p)-np.min(p))*255).astype(int)
G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int)
B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int)
for x in range(self.n):
y_from = s[4*x+0]
x_from = s[4*x+1]
y_to = y_from + s[4*x+2]
x_to = x_from + s[4*x+3]
data[int(y_from):int(y_to), int(x_from):int(x_to)] = [R[x], G[x], B[x]]
return np.array(data, dtype=np.uint8)
def TreeBuilder(self,p,s,o):
names = {0: 'V', 1: 'H'}
contains = np.array(p)
W = self.W
L = self.L
area = W * L
self.STS = Node(name = None, contains = contains, parent = None, area = area, width = W, length = L, upper_left = np.zeros((2,)), lower_right = np.array([W,L]), dtype = float)
for i,r in enumerate(o):
name = names[r]
cut_after_pos = s[i]
whats_in_pos = p[cut_after_pos-1]
parent = anytree.search.find(self.STS, lambda node: np.any(node.contains==whats_in_pos))
parent.name = name
starting_point = parent.upper_left
cuts = np.split(parent.contains, [np.where(parent.contains == whats_in_pos)[0][0]+1])
for c in cuts:
area = float(np.sum(self.a[c-1]))
length = area/parent.width if name == 'V' else parent.length
width = area/parent.length if name == 'H' else parent.width
starting_point = starting_point
contains = c
new_name = None if not len(c)==1 else c[0]
Node(name = new_name, \
contains = contains, \
parent = parent, \
area = area, \
width = width, \
length = length, \
upper_left = starting_point, \
lower_right = starting_point + np.array([width, length]), \
dtype = float)
starting_point = starting_point + np.array([0, length]) if parent.name == 'V' else starting_point + np.array([width, 0])
parent.contains = None
self.STS.root.area = np.sum([i.area for i in self.STS.root.children])
s = np.zeros((4*self.n,))
for l in self.STS.leaves:
trg = int(l.name)-1
s[4*trg] = l.upper_left[0]
s[4*trg+1] = l.upper_left[1]
s[4*trg+2] = l.width
s[4*trg+3] = l.length
return s
def step(self, a):
action = self.actions[a]
'''
Available actions in STS:
- Random permutation change
- Random slicing order change at two positions
- Shuffle slicing order (new random array)
- Bitswap in Orientation vector
- Do Nothing
'''
if action == 'Permute':
i = np.random.randint(0, len(self.permutation)-1)
j = np.random.randint(0, len(self.permutation)-1)
temp_perm = np.array(self.permutation)
temp_perm[i], temp_perm[j] = temp_perm[j], temp_perm[i]
self.permutation = np.array(temp_perm)
elif action == 'Slice_Swap':
i = np.random.randint(0, len(self.slicing)-1)
j = np.random.randint(0, len(self.slicing)-1)
temp_sli = np.array(self.slicing)
temp_sli[i], temp_sli[j] = temp_sli[j], temp_sli[i]
self.slicing = np.array(temp_sli)
elif action == 'Shuffle':
self.slicing = default_rng().choice(range(1,self.n), size=self.n-1, replace=False)
elif action == 'Bit_Swap':
i = np.random.randint(0, len(self.orientation)-1)
if self.orientation[i] == 1:
self.orientation[i] = 0
elif self.orientation[i] == 0:
self.orientation[i] = 1
elif action == 'Idle':
self.permutation = np.array(self.permutation)
self.slicing = np.array(self.slicing)
self.orientation = np.array(self.orientation)
new_state = self.TreeBuilder(self.permutation, self.slicing, self.orientation)
if self.mode == "human":
self.state = np.array(new_state)
elif self.mode == "rgb_array":
self.state = self.ConvertCoordinatesToState(new_state)
return self.state[:], 0, False, {}
def render(self, mode=None):
if self.mode == "human":
data = self.ConvertCoordinatesToState(self.state[:])
img = Image.fromarray(data, 'RGB')
elif self.mode == "rgb_array":
img = Image.fromarray(self.state, 'RGB')
plt.imshow(img)
plt.axis('off')
plt.show()
return img
def sampler(self):
return | default_rng() | numpy.random.default_rng |
import cv2
import numpy as np
def get_center_of_poly(pts):
# try:
# M = cv2.moments(pts)
# except:
M = cv2.moments( | np.array([pts]) | numpy.array |
# fitmultimodel.py - Multi-component distributions SNNLS fit function
# --------------------------------------------------------------------
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import copy
import numpy as np
import matplotlib.pyplot as plt
from types import FunctionType
import deerlab as dl
from deerlab.utils import hccm, goodness_of_fit, Jacobian
from deerlab.classes import FitResult
def fitmultimodel(V, Kmodel, r, model, maxModels, method='aic', lb=None, ub=None, lbK=None, ubK=None,
strategy='split', weights=1, renormalize = True, uq=True, tol=1e-9, maxiter=1e8):
r"""
Fits a multi-model parametric distance distribution model to a dipolar signal using separable
non-linear least-squares (SNLLS).
Parameters
----------
V : array_like or list of array_like
Dipolar signal(s) to be fitted.
Kmodel : callable or 2D-array_like
Dipolar kernel model. If no kernel parameters must be fitted, it can be specified as a matrix array
(or a list thereof if multiple signals are globally fitted).
Otherwise, it is a callable function that accepts an array of kernel parameters and returns
a kernel matrix array or a list thereof.
r : array_like
Distance axis, in nanometers.
model : callable
Basis component of the multi-component distance distribution.
Must be a callable DeerLab model function (e.g. ``dd_gauss`` or ``dd_rice``).
maxModels : scalar
Maximal number of components in the multi-component distance distribution.
method : string, optional
Functional metric used for the selection of the optimal number of components:
* ``'aic'`` Akaike information criterion
* ``'aicc'`` corrected Akaike information criterion
* ``'bic'`` Bayesian information criterion
* ``'rmsd'`` Root-mean squared deviation
The default is ``'aic'``.
lb : array_like, optional
Lower bounds for the distribution basis model parameters. If not specified, parameters are unbounded.
ub : array_like, optional
Upper bounds for the distribution basis model parameters. If not specified, parameters are unbounded.
ubK : array_like, optional
Lower bounds for the kernel model parameters. If not specified, parameters are unbounded.
ubK : array_like, optional
Upper bounds for the kernel model parameters. If not specified, parameters are unbounded.
strategy: string, optional
Strategy for the initialization of the multi-component non-linear parameters:
* ``'spread'`` For each N-component model, the non-linear parameters start values are spread equidistantly over the box constraints.
The number of components are changed in a forward matter, i.e. 1,2,...,N.
* ``'split'`` For each N-component model, the non-linear parameters start values are selected by splitting the location and spread of
the components obtained from the N-1 component fit. The number of components are changed in a forward matter, i.e. 1,2,...,N.
* ``'merge'`` For each N-component model, the non-linear parameters start values are selected by merging the location and spread of
the components obtained from the N+1 component fit. The number of components are changed in a backward matter, i.e. N,N-1,...,1.
The default is ``'split'``.
weights : array_like, optional
Array of weighting coefficients for the individual signals in global fitting, the default is all weighted equally.
renormalize : boolean, optional
Enable/disable renormalization of the fitted distribution, by default it is enabled.
uq : boolean, optional
Enable/disable the uncertainty quantification analysis, by default it is enabled.
tol : scalar, optional
Tolerance value for convergence of the NNLS algorithm. If not specified, the value is set to ``tol = 1e-9``.
maxiter: scalar, optional
Maximum number of iterations before termination. If not specified, the value is set to ``maxiter = 1e8``.
Returns
-------
:ref:`FitResult` with the following fields defined:
P : ndarray
Fitted distance distribution with optimal number of components.
Pparam : ndarray
Fitted distance distribution components parameters
amps : ndarray
Fitted components amplitudes
Kparam : ndarray
Fitted kernel parameters.
V : ndarray or list thereof
Fitted dipolar signal(s).
Puncert : :ref:`UQResult`
Covariance-based uncertainty quantification of the fitted distance distribution
paramUncert : :ref:`UQResult`
Covariance-based uncertainty quantification of the fitted parameters
Vuncert : ndarray or list thereof
Covariance-based uncertainty quantification of the fitted dipolar signal(s).
Nopt : int scalar
Optimized number of components in model.
Pn : list of ndarrays
List of all fitted multi-component distance distributions.
selfun : ndarray
Selection functional values (as specified as ``method``) for the all fitted multi-component models.
scale : float int or list of float int
Amplitude scale(s) of the dipolar signal(s).
plot : callable
Function to display the results. It will display the
fitted signals, the distance distribution with confidence intervals,
and the values of the selection functional. The function returns the figure object
(``matplotlib.figure.Figure``) object as output, which can be
modified. Using ``fig = plot(show=False)`` will not render
the figure unless ``display(fig)`` is called.
stats : dict
Goodness of fit statistical estimators:
* ``stats['chi2red']`` - Reduced \chi^2 test
* ``stats['r2']`` - R^2 test
* ``stats['rmsd']`` - Root-mean squared deviation (RMSD)
* ``stats['aic']`` - Akaike information criterion
* ``stats['aicc']`` - Corrected Akaike information criterion
* ``stats['bic']`` - Bayesian information criterion
success : bool
Whether or not the optimizer exited successfully.
cost : float
Value of the cost function at the solution.
residuals : ndarray
Vector of residuals at the solution.
Notes
-----
This function takes advantage of the special structure of a multi-component model, i.e. the separability
of the component amplitudes as linear parameters from the rest of the non-linear parameters. This makes it
suitable to be solved as a SNLLS problem.
Examples
--------
A classical example involves the fit of a multi-Gauss distance distribution to a 4-pulse DEER dipolar signal.
Since the signal requires additional parameters (e.g. modulation depth, background parameters,…) a kernel model
can be defined to account for these::
def Kmodel(Kpar):
# Unpack parameters
lam,conc = Kpar
# Construct kernel
K = dl.dipolarkernel(t,r,mod=lam,bg=dl.bg_hom3d(t,conc,lam))
return K
fit = dl.fitmultimodel(V,Kmodel,r,dd_model,Nmax,'aicc')
If multiple signals are to be fitted globally the example abova can be easily adapted by passing multiple
signals to the fit function and by returning multiple kernels with the kernel model function::
def Kmodel(Kpar):
# Unpack parameters
lam,conc = Kpar
# Construct kernels for both signals
K1 = dl.dipolarkernel(t1,r,mod=lam,bg=bg_hom3d(t1,conc,lam))
K2 = dl.dipolarkernel(t2,r,mod=lam,bg=bg_hom3d(t2,conc,lam))
return K1,K2
fit = dl.fitmultimodel([V1,V2],Kmodel,r,dd_model,Nmax,'aicc')
"""
# Ensure that all arrays are numpy.nparray
r = np.atleast_1d(r)
# Parse multiple datsets and non-linear operators into a single concatenated vector/matrix
V, Kmodel, weights, Vsubsets, prescales = dl.utils.parse_multidatasets(V, Kmodel, weights,precondition=True)
# Check kernel model
if type(Kmodel) is FunctionType:
# If callable, determine how many parameters the model requires
nKparam = 0
notEnoughParam = True
while notEnoughParam:
nKparam = nKparam + 1
try:
Kmodel(np.random.uniform(size=nKparam))
notEnoughParam = False
except ValueError:
notEnoughParam = True
else:
# If the kernel is just a matrix make it a callable without parameters
nKparam = 0
K = copy.deepcopy(Kmodel) # need a copy to avoid infite recursion on next step
Kmodel = lambda _: K
# Extract information about the model
nparam = len(model.start)
if lb is None:
lb = model.lower
if ub is None:
ub = model.upper
paramnames = model.parameters
if lbK is None:
lbK = []
if ubK is None:
ubK = []
# Ensure that all arrays are numpy.nparray
lb,ub,lbK,ubK = np.atleast_1d(lb,ub,lbK,ubK)
if len(lbK) is not nKparam or len(ubK) is not nKparam:
raise ValueError('The upper/lower bounds of the kernel parameters must be ',nKparam,'-element arrays')
areLocations = [str in ['Mean','Location'] for str in paramnames]
areSpreads = [str in ['Spread','Width','Standard deviation'] for str in paramnames]
if any(areLocations):
# If the center of the basis function is a parameter limit it
# to the distance axis range (stabilizes parameter search)
ub[areLocations] = max(r)
lb[areLocations] = min(r)
def nonlinmodel(par,Nmodels):
#===============================================================================
"""
Non-linear augmented kernel model
----------------------------------
This function constructs the actual non-linear function which is
passed to the SNLLS problem. The full signal is obtained by multiplication
of this matrix by a vector of amplitudes.
"""
# Get kernel with current non-linear parameters
K = Kmodel(par[ | np.arange(0,nKparam) | numpy.arange |
import os
import dipy.reconst.dti as dti
import numpy as np
from dipy.core.gradients import gradient_table, generate_bvecs
from dipy.io.image import save_nifti
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
from dipy.reconst.csdeconv import auto_response
from scipy.spatial.transform import Rotation
class Ellipse:
def __init__(self, mean, covariance):
self.covariance = covariance
self.inv_covariance = np.linalg.pinv(self.covariance)
self.mean = mean
def quad_form_at(self, x):
delta = x - self.mean
quad_form = delta.T @ self.covariance @ delta
return quad_form
def exp_quad_at(self, x):
return np.exp(-self.quad_form_at(x))
def rotate_covariance(self, angles):
R = Rotation.from_rotvec(angles).as_matrix()
self.covariance = R @ self.covariance @ R.T
self.inv_covariance = R @ self.inv_covariance @ R.T
class DiffusionImageModel:
def __init__(self, img_dims, intensity_threshold=1.e-3, rotate=False):
center = np.array(img_dims) / 2
cov = (1 / 5) * np.array([[1, 0, 0], [0, 0.5, 0], [0, 0, 0.01]])
ellipse = Ellipse(mean=center, covariance=cov)
if rotate is True:
angles = [np.pi / 3, np.pi / 4, np.pi / 5]
ellipse.rotate_covariance(angles)
self.ellipse = ellipse
self.img_dims = img_dims
self.intensity_threshold = intensity_threshold
self.affine = np.eye(4)
self.image = None
self.b0_image = None
self.bvals = None
self.bvecs = None
self.volumes = None
self.dti = None
self.v1 = None
self.odf = None
def fit_dti(self):
covariance = [
[1., 0., 0.],
[0., 0.2, 0.],
[0., 0., 0.2]
]
covariance = 100 * | np.array(covariance) | numpy.array |
"""""""""
Written by <NAME> at Yu Lab, University of Chicago
"""""""""
from scipy.signal import sosfiltfilt
import numpy as np
# TESTED
def generate_matrix(regr_buffer_size):
sampling_axis = np.arange(regr_buffer_size)
A = np.vstack([sampling_axis, np.ones(len(sampling_axis))]).T
return A
# TESTED
def calculate_derv(A, filter, Detector):
curr_filtered = sosfiltfilt(filter, Detector.data_buffer)
curr_regr = curr_filtered[len(curr_filtered) - Detector.regr_buffer_size:, np.newaxis]
pinv = | np.linalg.pinv(A) | numpy.linalg.pinv |
import typing
from pathlib import Path
import numpy as np
import scipy.fftpack
from scipy import signal
import einops
import functools
import torch
import paderbox as pb
import padertorch as pt
import padercontrib as pc
from padertorch.contrib.cb.transform import stft as pt_stft, istft as pt_istft
from padertorch.contrib.cb import complex as pt_complex
def kaldi_stft(time_signal, size=512, shift=160, *, axis=-1, window_length=400, pad=False, fading=False):
# ToDo: window
return pb.transform.stft(**locals())
def kaldi_istft(stft_signal, size=512, shift=160, *, axis=-1, window_length=400, pad=False, fading=False):
# ToDo: window
return pb.transform.istft(**locals())
def stft_to_cepstrum(
stft_signal,
norm='ortho',
eps=None,
):
"""
Reference implementation to get the cepstrum: dft -> abs -> log -> dft
>>> signal1 = np.array([1, 2, 3, 4])
>>> signal2 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
>>> dft_signal = np.fft.fft(signal1)
>>> np.fft.fft(np.log(np.abs(dft_signal)), norm='ortho')
array([2.53758691+0.j, 0.80471896+0.j, 0.45814537+0.j, 0.80471896+0.j])
>>> dft_signal = np.fft.fft(signal2)
>>> np.fft.fft(np.log(np.abs(dft_signal)), norm='ortho')
array([5.67812692+0.j, 1.21752299+0.j, 0.53177166+0.j, 0.33614941+0.j,
0.28670713+0.j, 0.33614941+0.j, 0.53177166+0.j, 1.21752299+0.j])
Implementation without redundant operations: rdft -> abs -> log -> dct
>>> rdft_signal = np.fft.rfft(signal1)
>>> stft_to_cepstrum(rdft_signal)
array([2.53758691, 0.80471896, 0.45814537])
>>> rdft_signal = np.fft.rfft(signal2)
>>> stft_to_cepstrum(rdft_signal)
array([5.67812692, 1.21752299, 0.53177166, 0.33614941, 0.28670713])
Note: a scaling only influences the first value
>>> rdft_signal = np.fft.rfft(signal1)
>>> stft_to_cepstrum(rdft_signal * 2)
array([3.92388127, 0.80471896, 0.45814537])
>>> stft_to_cepstrum([0., 0, 0])
array([-1416.79283706, 0. , 0. ])
>>> stft_to_cepstrum([0., 0, 0], eps=0)
array([-inf, nan, nan])
>>> stft_to_cepstrum([1., 1, 1])
array([0., 0., 0.])
>>> stft_to_cepstrum([1., 1, 1], eps=0)
array([0., 0., 0.])
>>> stft_to_cepstrum([0., 1, 1])
array([-354.19820927, -354.19820927, -354.19820927])
>>> stft_to_cepstrum([0., 1, 1], eps=0)
array([-inf, -inf, -inf])
"""
stft_signal = np.asarray(stft_signal)
assert stft_signal.shape[-1] % 2 == 1, stft_signal.shape
stft_signal = np.abs(stft_signal)
if eps is None:
eps = np.finfo(stft_signal.dtype).tiny
if eps != 0:
stft_signal = | np.maximum(stft_signal, eps) | numpy.maximum |
# pylint: disable=redefined-outer-name, no-member
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from scipy.stats import linregress
from xarray import DataArray, Dataset
from ...data import concat, convert_to_inference_data, from_dict, load_arviz_data
from ...rcparams import rcParams
from ...stats import (
apply_test_function,
compare,
ess,
hdi,
loo,
loo_pit,
psislw,
r2_score,
summary,
waic,
)
from ...stats.stats import _gpinv
from ...stats.stats_utils import get_log_likelihood
from ..helpers import check_multiple_attrs, multidim_models # pylint: disable=unused-import
rcParams["data.load"] = "eager"
@pytest.fixture(scope="session")
def centered_eight():
centered_eight = load_arviz_data("centered_eight")
return centered_eight
@pytest.fixture(scope="session")
def non_centered_eight():
non_centered_eight = load_arviz_data("non_centered_eight")
return non_centered_eight
def test_hdp():
normal_sample = np.random.randn(5000000)
interval = hdi(normal_sample)
assert_array_almost_equal(interval, [-1.88, 1.88], 2)
def test_hdp_2darray():
normal_sample = np.random.randn(12000, 5)
result = hdi(normal_sample)
assert result.shape == (5, 2)
def test_hdi_multidimension():
normal_sample = np.random.randn(12000, 10, 3)
result = hdi(normal_sample)
assert result.shape == (3, 2)
def test_hdi_idata(centered_eight):
data = centered_eight.posterior
result = hdi(data)
assert isinstance(result, Dataset)
assert dict(result.dims) == {"school": 8, "hdi": 2}
result = hdi(data, input_core_dims=[["chain"]])
assert isinstance(result, Dataset)
assert result.dims == {"draw": 500, "hdi": 2, "school": 8}
def test_hdi_idata_varnames(centered_eight):
data = centered_eight.posterior
result = hdi(data, var_names=["mu", "theta"])
assert isinstance(result, Dataset)
assert result.dims == {"hdi": 2, "school": 8}
assert list(result.data_vars.keys()) == ["mu", "theta"]
def test_hdi_idata_group(centered_eight):
result_posterior = hdi(centered_eight, group="posterior", var_names="mu")
result_prior = hdi(centered_eight, group="prior", var_names="mu")
assert result_prior.dims == {"hdi": 2}
range_posterior = result_posterior.mu.values[1] - result_posterior.mu.values[0]
range_prior = result_prior.mu.values[1] - result_prior.mu.values[0]
assert range_posterior < range_prior
def test_hdi_coords(centered_eight):
data = centered_eight.posterior
result = hdi(data, coords={"chain": [0, 1, 3]}, input_core_dims=[["draw"]])
assert_array_equal(result.coords["chain"], [0, 1, 3])
def test_hdi_multimodal():
normal_sample = np.concatenate(
(np.random.normal(-4, 1, 2500000), np.random.normal(2, 0.5, 2500000))
)
intervals = hdi(normal_sample, multimodal=True)
assert_array_almost_equal(intervals, [[-5.8, -2.2], [0.9, 3.1]], 1)
def test_hdi_circular():
normal_sample = np.random.vonmises(np.pi, 1, 5000000)
interval = hdi(normal_sample, circular=True)
assert_array_almost_equal(interval, [0.6, -0.6], 1)
def test_hdi_bad_ci():
normal_sample = np.random.randn(10)
with pytest.raises(ValueError):
hdi(normal_sample, hdi_prob=2)
def test_hdi_skipna():
normal_sample = np.random.randn(500)
interval = hdi(normal_sample[10:])
normal_sample[:10] = np.nan
interval_ = hdi(normal_sample, skipna=True)
assert_array_almost_equal(interval, interval_)
def test_r2_score():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
assert_allclose(res.rvalue ** 2, r2_score(y, res.intercept + res.slope * x).r2, 2)
def test_r2_score_multivariate():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
y_multivariate = np.c_[y, y]
y_multivariate_pred = np.c_[res.intercept + res.slope * x, res.intercept + res.slope * x]
assert not np.isnan(r2_score(y_multivariate, y_multivariate_pred).r2)
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("multidim", [True, False])
def test_compare_same(centered_eight, multidim_models, method, multidim):
if multidim:
data_dict = {"first": multidim_models.model_1, "second": multidim_models.model_1}
else:
data_dict = {"first": centered_eight, "second": centered_eight}
weight = compare(data_dict, method=method)["weight"]
assert_allclose(weight[0], weight[1])
assert_allclose(np.sum(weight), 1.0)
def test_compare_unknown_ic_and_method(centered_eight, non_centered_eight):
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
with pytest.raises(NotImplementedError):
compare(model_dict, ic="Unknown", method="stacking")
with pytest.raises(ValueError):
compare(model_dict, ic="loo", method="Unknown")
@pytest.mark.parametrize("ic", ["loo", "waic"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
def test_compare_different(centered_eight, non_centered_eight, ic, method, scale):
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
weight = compare(model_dict, ic=ic, method=method, scale=scale)["weight"]
assert weight["non_centered"] >= weight["centered"]
assert_allclose(np.sum(weight), 1.0)
@pytest.mark.parametrize("ic", ["loo", "waic"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
def test_compare_different_multidim(multidim_models, ic, method):
model_dict = {"model_1": multidim_models.model_1, "model_2": multidim_models.model_2}
weight = compare(model_dict, ic=ic, method=method)["weight"]
# this should hold because the same seed is always used
assert weight["model_1"] >= weight["model_2"]
assert_allclose(np.sum(weight), 1.0)
def test_compare_different_size(centered_eight, non_centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior = centered_eight.posterior.drop("Choate", "school")
centered_eight.sample_stats = centered_eight.sample_stats.drop("Choate", "school")
centered_eight.posterior_predictive = centered_eight.posterior_predictive.drop(
"Choate", "school"
)
centered_eight.prior = centered_eight.prior.drop("Choate", "school")
centered_eight.observed_data = centered_eight.observed_data.drop("Choate", "school")
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
with pytest.raises(ValueError):
compare(model_dict, ic="waic", method="stacking")
@pytest.mark.parametrize("var_names_expected", ((None, 10), ("mu", 1), (["mu", "tau"], 2)))
def test_summary_var_names(centered_eight, var_names_expected):
var_names, expected = var_names_expected
summary_df = summary(centered_eight, var_names=var_names)
assert len(summary_df.index) == expected
METRICS_NAMES = [
"mean",
"sd",
"hdi_3%",
"hdi_97%",
"mcse_mean",
"mcse_sd",
"ess_mean",
"ess_sd",
"ess_bulk",
"ess_tail",
"r_hat",
]
@pytest.mark.parametrize(
"params",
(("all", METRICS_NAMES), ("stats", METRICS_NAMES[:4]), ("diagnostics", METRICS_NAMES[4:])),
)
def test_summary_kind(centered_eight, params):
kind, metrics_names_ = params
summary_df = summary(centered_eight, kind=kind)
assert_array_equal(summary_df.columns, metrics_names_)
@pytest.mark.parametrize("fmt", ["wide", "long", "xarray"])
def test_summary_fmt(centered_eight, fmt):
assert summary(centered_eight, fmt=fmt) is not None
@pytest.mark.parametrize("order", ["C", "F"])
def test_summary_unpack_order(order):
data = from_dict({"a": np.random.randn(4, 100, 4, 5, 3)})
az_summary = summary(data, order=order, fmt="wide")
assert az_summary is not None
if order != "F":
first_index = 4
second_index = 5
third_index = 3
else:
first_index = 3
second_index = 5
third_index = 4
column_order = []
for idx1 in range(first_index):
for idx2 in range(second_index):
for idx3 in range(third_index):
if order != "F":
column_order.append("a[{},{},{}]".format(idx1, idx2, idx3))
else:
column_order.append("a[{},{},{}]".format(idx3, idx2, idx1))
for col1, col2 in zip(list(az_summary.index), column_order):
assert col1 == col2
@pytest.mark.parametrize("origin", [0, 1, 2, 3])
def test_summary_index_origin(origin):
data = from_dict({"a": np.random.randn(2, 50, 10)})
az_summary = summary(data, index_origin=origin, fmt="wide")
assert az_summary is not None
for i, col in enumerate(list(az_summary.index)):
assert col == "a[{}]".format(i + origin)
@pytest.mark.parametrize(
"stat_funcs", [[np.var], {"var": np.var, "var2": lambda x: np.var(x) ** 2}]
)
def test_summary_stat_func(centered_eight, stat_funcs):
arviz_summary = summary(centered_eight, stat_funcs=stat_funcs)
assert arviz_summary is not None
assert hasattr(arviz_summary, "var")
def test_summary_nan(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior.theta[:, :, 0] = np.nan
summary_xarray = summary(centered_eight)
assert summary_xarray is not None
assert summary_xarray.loc["theta[0]"].isnull().all()
assert (
summary_xarray.loc[[ix for ix in summary_xarray.index if ix != "theta[0]"]]
.notnull()
.all()
.all()
)
def test_summary_skip_nan(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior.theta[:, :10, 1] = np.nan
summary_xarray = summary(centered_eight)
theta_1 = summary_xarray.loc["theta[1]"].isnull()
assert summary_xarray is not None
assert ~theta_1[:4].all()
assert theta_1[4:].all()
@pytest.mark.parametrize("fmt", [1, "bad_fmt"])
def test_summary_bad_fmt(centered_eight, fmt):
with pytest.raises(TypeError):
summary(centered_eight, fmt=fmt)
@pytest.mark.parametrize("order", [1, "bad_order"])
def test_summary_bad_unpack_order(centered_eight, order):
with pytest.raises(TypeError):
summary(centered_eight, order=order)
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("multidim", (True, False))
def test_waic(centered_eight, multidim_models, scale, multidim):
"""Test widely available information criterion calculation"""
if multidim:
assert waic(multidim_models.model_1, scale=scale) is not None
waic_pointwise = waic(multidim_models.model_1, pointwise=True, scale=scale)
else:
assert waic(centered_eight, scale=scale) is not None
waic_pointwise = waic(centered_eight, pointwise=True, scale=scale)
assert waic_pointwise is not None
assert "waic_i" in waic_pointwise
def test_waic_bad(centered_eight):
"""Test widely available information criterion calculation"""
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
waic(centered_eight)
del centered_eight.sample_stats
with pytest.raises(TypeError):
waic(centered_eight)
def test_waic_bad_scale(centered_eight):
"""Test widely available information criterion calculation with bad scale."""
with pytest.raises(TypeError):
waic(centered_eight, scale="bad_value")
def test_waic_warning(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.sample_stats["log_likelihood"][:, :250, 1] = 10
with pytest.warns(UserWarning):
assert waic(centered_eight, pointwise=True) is not None
# this should throw a warning, but due to numerical issues it fails
centered_eight.sample_stats["log_likelihood"][:, :, :] = 0
with pytest.warns(UserWarning):
assert waic(centered_eight, pointwise=True) is not None
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
def test_waic_print(centered_eight, scale):
waic_data = waic(centered_eight, scale=scale).__repr__()
waic_pointwise = waic(centered_eight, scale=scale, pointwise=True).__repr__()
assert waic_data is not None
assert waic_pointwise is not None
assert waic_data == waic_pointwise
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
@pytest.mark.parametrize("multidim", (True, False))
def test_loo(centered_eight, multidim_models, scale, multidim):
"""Test approximate leave one out criterion calculation"""
if multidim:
assert loo(multidim_models.model_1, scale=scale) is not None
loo_pointwise = loo(multidim_models.model_1, pointwise=True, scale=scale)
else:
assert loo(centered_eight, scale=scale) is not None
loo_pointwise = loo(centered_eight, pointwise=True, scale=scale)
assert loo_pointwise is not None
assert "loo_i" in loo_pointwise
assert "pareto_k" in loo_pointwise
assert "loo_scale" in loo_pointwise
def test_loo_one_chain(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior = centered_eight.posterior.drop([1, 2, 3], "chain")
centered_eight.sample_stats = centered_eight.sample_stats.drop([1, 2, 3], "chain")
assert loo(centered_eight) is not None
def test_loo_bad(centered_eight):
with pytest.raises(TypeError):
loo(np.random.randn(2, 10))
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
loo(centered_eight)
def test_loo_bad_scale(centered_eight):
"""Test loo with bad scale value."""
with pytest.raises(TypeError):
loo(centered_eight, scale="bad_scale")
def test_loo_bad_no_posterior_reff(centered_eight):
loo(centered_eight, reff=None)
centered_eight = deepcopy(centered_eight)
del centered_eight.posterior
with pytest.raises(TypeError):
loo(centered_eight, reff=None)
loo(centered_eight, reff=0.7)
def test_loo_warning(centered_eight):
centered_eight = deepcopy(centered_eight)
# make one of the khats infinity
centered_eight.sample_stats["log_likelihood"][:, :, 1] = 10
with pytest.warns(UserWarning) as records:
assert loo(centered_eight, pointwise=True) is not None
assert any("Estimated shape parameter" in str(record.message) for record in records)
# make all of the khats infinity
centered_eight.sample_stats["log_likelihood"][:, :, :] = 1
with pytest.warns(UserWarning) as records:
assert loo(centered_eight, pointwise=True) is not None
assert any("Estimated shape parameter" in str(record.message) for record in records)
@pytest.mark.parametrize("scale", ["log", "negative_log", "deviance"])
def test_loo_print(centered_eight, scale):
loo_data = loo(centered_eight, scale=scale).__repr__()
loo_pointwise = loo(centered_eight, scale=scale, pointwise=True).__repr__()
assert loo_data is not None
assert loo_pointwise is not None
assert len(loo_data) < len(loo_pointwise)
def test_psislw(centered_eight):
pareto_k = loo(centered_eight, pointwise=True, reff=0.7)["pareto_k"]
log_likelihood = get_log_likelihood(centered_eight)
log_likelihood = log_likelihood.stack(sample=("chain", "draw"))
assert_allclose(pareto_k, psislw(-log_likelihood, 0.7)[1])
@pytest.mark.parametrize("probs", [True, False])
@pytest.mark.parametrize("kappa", [-1, -0.5, 1e-30, 0.5, 1])
@pytest.mark.parametrize("sigma", [0, 2])
def test_gpinv(probs, kappa, sigma):
if probs:
probs = np.array([0.1, 0.1, 0.1, 0.2, 0.3])
else:
probs = np.array([-0.1, 0.1, 0.1, 0.2, 0.3])
assert len(_gpinv(probs, kappa, sigma)) == len(probs)
@pytest.mark.parametrize("func", [loo, waic])
def test_multidimensional_log_likelihood(func):
llm = np.random.rand(4, 23, 15, 2)
ll1 = llm.reshape(4, 23, 15 * 2)
statsm = Dataset(dict(log_likelihood=DataArray(llm, dims=["chain", "draw", "a", "b"])))
stats1 = Dataset(dict(log_likelihood=DataArray(ll1, dims=["chain", "draw", "v"])))
post = Dataset(dict(mu=DataArray(np.random.rand(4, 23, 2), dims=["chain", "draw", "v"])))
dsm = convert_to_inference_data(statsm, group="sample_stats")
ds1 = convert_to_inference_data(stats1, group="sample_stats")
dsp = convert_to_inference_data(post, group="posterior")
dsm = concat(dsp, dsm)
ds1 = concat(dsp, ds1)
frm = func(dsm)
fr1 = func(ds1)
assert (fr1 == frm).all()
assert_array_almost_equal(frm[:4], fr1[:4])
@pytest.mark.parametrize(
"args",
[
{"y": "obs"},
{"y": "obs", "y_hat": "obs"},
{"y": "arr", "y_hat": "obs"},
{"y": "obs", "y_hat": "arr"},
{"y": "arr", "y_hat": "arr"},
{"y": "obs", "y_hat": "obs", "log_weights": "arr"},
{"y": "arr", "y_hat": "obs", "log_weights": "arr"},
{"y": "obs", "y_hat": "arr", "log_weights": "arr"},
{"idata": False},
],
)
def test_loo_pit(centered_eight, args):
y = args.get("y", None)
y_hat = args.get("y_hat", None)
log_weights = args.get("log_weights", None)
y_arr = centered_eight.observed_data.obs
y_hat_arr = centered_eight.posterior_predictive.obs.stack(sample=("chain", "draw"))
log_like = get_log_likelihood(centered_eight).stack(sample=("chain", "draw"))
n_samples = len(log_like.sample)
ess_p = ess(centered_eight.posterior, method="mean")
reff = np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples
log_weights_arr = psislw(-log_like, reff=reff)[0]
if args.get("idata", True):
if y == "arr":
y = y_arr
if y_hat == "arr":
y_hat = y_hat_arr
if log_weights == "arr":
log_weights = log_weights_arr
loo_pit_data = loo_pit(idata=centered_eight, y=y, y_hat=y_hat, log_weights=log_weights)
else:
loo_pit_data = loo_pit(idata=None, y=y_arr, y_hat=y_hat_arr, log_weights=log_weights_arr)
assert np.all((loo_pit_data >= 0) & (loo_pit_data <= 1))
@pytest.mark.parametrize(
"args",
[
{"y": "y"},
{"y": "y", "y_hat": "y"},
{"y": "arr", "y_hat": "y"},
{"y": "y", "y_hat": "arr"},
{"y": "arr", "y_hat": "arr"},
{"y": "y", "y_hat": "y", "log_weights": "arr"},
{"y": "arr", "y_hat": "y", "log_weights": "arr"},
{"y": "y", "y_hat": "arr", "log_weights": "arr"},
{"idata": False},
],
)
def test_loo_pit_multidim(multidim_models, args):
y = args.get("y", None)
y_hat = args.get("y_hat", None)
log_weights = args.get("log_weights", None)
idata = multidim_models.model_1
y_arr = idata.observed_data.y
y_hat_arr = idata.posterior_predictive.y.stack(sample=("chain", "draw"))
log_like = get_log_likelihood(idata).stack(sample=("chain", "draw"))
n_samples = len(log_like.sample)
ess_p = ess(idata.posterior, method="mean")
reff = np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples
log_weights_arr = psislw(-log_like, reff=reff)[0]
if args.get("idata", True):
if y == "arr":
y = y_arr
if y_hat == "arr":
y_hat = y_hat_arr
if log_weights == "arr":
log_weights = log_weights_arr
loo_pit_data = loo_pit(idata=idata, y=y, y_hat=y_hat, log_weights=log_weights)
else:
loo_pit_data = loo_pit(idata=None, y=y_arr, y_hat=y_hat_arr, log_weights=log_weights_arr)
assert np.all((loo_pit_data >= 0) & (loo_pit_data <= 1))
@pytest.mark.parametrize("input_type", ["idataarray", "idatanone_ystr", "yarr_yhatnone"])
def test_loo_pit_bad_input(centered_eight, input_type):
"""Test incompatible input combinations."""
arr = np.random.random((8, 200))
if input_type == "idataarray":
with pytest.raises(ValueError, match=r"type InferenceData or None"):
loo_pit(idata=arr, y="obs")
elif input_type == "idatanone_ystr":
with pytest.raises(ValueError, match=r"all 3.+must be array or DataArray"):
loo_pit(idata=None, y="obs")
elif input_type == "yarr_yhatnone":
with pytest.raises(ValueError, match=r"y_hat.+None.+y.+str"):
loo_pit(idata=centered_eight, y=arr, y_hat=None)
@pytest.mark.parametrize("arg", ["y", "y_hat", "log_weights"])
def test_loo_pit_bad_input_type(centered_eight, arg):
"""Test wrong input type (not None, str not DataArray."""
kwargs = {"y": "obs", "y_hat": "obs", "log_weights": None}
kwargs[arg] = 2 # use int instead of array-like
with pytest.raises(ValueError, match="not {}".format(type(2))):
loo_pit(idata=centered_eight, **kwargs)
@pytest.mark.parametrize("incompatibility", ["y-y_hat1", "y-y_hat2", "y_hat-log_weights"])
def test_loo_pit_bad_input_shape(incompatibility):
"""Test shape incompatiblities."""
y = np.random.random(8)
y_hat = np.random.random((8, 200))
log_weights = np.random.random((8, 200))
if incompatibility == "y-y_hat1":
with pytest.raises(ValueError, match="1 more dimension"):
loo_pit(y=y, y_hat=y_hat[None, :], log_weights=log_weights)
elif incompatibility == "y-y_hat2":
with pytest.raises(ValueError, match="y has shape"):
loo_pit(y=y, y_hat=y_hat[1:3, :], log_weights=log_weights)
elif incompatibility == "y_hat-log_weights":
with pytest.raises(ValueError, match="must have the same shape"):
loo_pit(y=y, y_hat=y_hat[:, :100], log_weights=log_weights)
@pytest.mark.parametrize("pointwise", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"kwargs",
[
{},
{"group": "posterior_predictive", "var_names": {"posterior_predictive": "obs"}},
{"group": "observed_data", "var_names": {"both": "obs"}, "out_data_shape": "shape"},
{"var_names": {"both": "obs", "posterior": ["theta", "mu"]}},
{"group": "observed_data", "out_name_data": "T_name"},
],
)
def test_apply_test_function(centered_eight, pointwise, inplace, kwargs):
"""Test some usual call cases of apply_test_function"""
centered_eight = deepcopy(centered_eight)
group = kwargs.get("group", "both")
var_names = kwargs.get("var_names", None)
out_data_shape = kwargs.get("out_data_shape", None)
out_pp_shape = kwargs.get("out_pp_shape", None)
out_name_data = kwargs.get("out_name_data", "T")
if out_data_shape == "shape":
out_data_shape = (8,) if pointwise else ()
if out_pp_shape == "shape":
out_pp_shape = (4, 500, 8) if pointwise else (4, 500)
idata = deepcopy(centered_eight)
idata_out = apply_test_function(
idata,
lambda y, theta: | np.mean(y) | numpy.mean |
# this tells python to act as if though We are one folder up
import sys
sys.path.insert(0,'..')
import pandas as pd
import FixedEffectModelPyHDFE.api as FEM
from FixedEffectModelPyHDFE.DemeanDataframe import get_np_columns
#import FixedEffectModel.api as FEM
import numpy as np
from patsy import dmatrices
import statsmodels.formula.api as smf
import statsmodels.api as sm
from fastreg import linear
from datetime import datetime
import unittest
from math import isclose
NLS_WORK = "./../data/test_dropped_na.dta"
CEREAL = "./../data/cereal.dta"
AUTO = "./../data/auto_drop_na.dta"
TOLERANCE = 0.01
class FixedEffectsModelTestsVSfastreg(unittest.TestCase):
def setup(self, data_directory, target, regressors, absorb, cluster):
print(self._testMethodName)
print("target: ", target)
print("regressors: ", regressors)
print("absorb: ", absorb)
print("cluster: ", cluster)
df = pd.read_stata(data_directory)
df.reset_index(drop=True, inplace=True)
fem_start = datetime.now()
self.result = FEM.ols_high_d_category(df,
regressors,
target,
absorb,
cluster,
formula=None,
robust=False,
epsilon = 1e-8,
max_iter = 1e6)
fem_end = datetime.now()
print("FEM time taken: " + str(fem_end-fem_start))
self.result.summary()
print()
if absorb[0] == '0':
absorb=None
fastreg_start = datetime.now()
fastreg = linear.ols(y=target[0],
x=regressors,
absorb=absorb,
cluster=cluster,
data=df)
fastreg_end = datetime.now()
print(fastreg)
print("fastreg time taken: " + str(fastreg_end - fastreg_start))
print("\n\n\n\n\n")
#########################################################################
#########################################################################
def test_just_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['idcode', 'birth_yr', 'fifty_clusts', 'sixty_clusts'],
cluster=[])
def test_no_absorb_cluster_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['idcode', 'birth_yr', 'fifty_clusts', 'sixty_clusts'])
# comparing fvalue
def test_clustering_single_variable_no_absorb2_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['race'])
# comparing fvalue
assert(np.isclose(self.result.fvalue, 127593.72, atol=TOLERANCE))
# comparing standard errors
assert(np.all(np.isclose(self.result.bse, np.asarray([.148934, .0065111, .0113615]), atol=TOLERANCE)))
# comparing tvalues
assert(np.all(np.isclose(self.result.tvalues, np.asarray([27.75, 2.32, 66.61]), atol=TOLERANCE)))
def test_clustering_single_variable_no_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['fifty_clusts'])
assert(np.isclose(self.result.fvalue, 10230.63, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.048274, .0044294, .0052923]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([85.60, 3.42, 143.00]), atol=TOLERANCE)))
def test_clustering_two_variables_no_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['fifty_clusts', 'sixty_clusts'])
assert(np.isclose(self.result.fvalue, 12347.24, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.0518019, .0048228, .00492]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([79.77, 3.14, 153.82]), atol=TOLERANCE)))
def test_clustering_many_variables_no_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['fifty_clusts', 'sixty_clusts', 'birth_yr', 'idcode'])
assert(np.isclose(self.result.fvalue, 4664.62, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.0551555, .0080815, .007881]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, | np.asarray([74.92, 1.87, 96.03]) | numpy.asarray |
import numpy as np
from numba import jit, int32, float32, double, cfunc
from numba.experimental import jitclass
spec = [
('x', double[:]), ('dq', double[:]), ('u', double[:]),
('m', double), ('Iz', double),
('lf', double), ('lr', double),
('Bf', double), ('Cf', double), ('Df', double),
('Br', double), ('Cr', double), ('Dr', double),
('Cr0', double), ('Cr2', double),
('Cm1', double), ('Cm2', double),
('iterNum', int32), ('sim_dt', double), ('control_dt', double),
('car_shape', double[:,:]),
]
@jitclass(spec)
class VehicleSimModel(object):
def __init__(self, m=0.041, Iz=27.8E-6,
lf=0.029, lr=0.033,
Bf=2.579, Cf=1.2, Df=0.192,
Br=3.3852, Cr=1.2691, Dr=0.1737,
Cm1=0.287, Cm2=0.0545,
Cr0= 0.0518,
Cr2=0.00035, scale=1.0, control_dt = 10.0, sim_dt=1.0):
self.x = np.asfortranarray(np.zeros(3, dtype=np.float64))
self.dq = np.zeros(3, dtype=np.float64)
self.u = np.zeros(2, dtype=np.float64)
self.m = m
self.Iz= Iz
self.lf = lf
self.lr = lr
self.Bf = Bf
self.Cf = Cf
self.Df = Df
self.Br = Br
self.Cr = Cr
self.Dr = Dr
self.Cr0 = Cr0
self.Cr2 = Cr2
self.Cm1 = Cm1
self.Cm2 = Cm2
car_l = (lf + lr)/2 * scale
car_w = car_l/2
self.car_shape = np.asfortranarray(np.array([ [ car_l, car_w, 1.],
[ car_l,-car_w, 1.],
[-car_l,-car_w, 1.],
[-car_l, car_w, 1.],
[ car_l, car_w, 1.]], dtype=np.float64))
self.sim_dt = sim_dt
self.control_dt = control_dt
self.iterNum = int(self.control_dt/self.sim_dt)
@property
def shape(self):
shape = np.dot(self.car_shape,
np.asfortranarray(np.array([
[ | np.cos(self.x[2]) | numpy.cos |
import os
from collections import defaultdict
from datetime import datetime
from subprocess import PIPE, call
import astropy.io.fits as pyfits
import astropy.units as u
import astropy.wcs as pywcs
import matplotlib.pyplot as plt
import numpy as np
import pyregion._region_filter as rfilter
import scipy.interpolate as interpolate
from six import string_types
from tqdm import tqdm
from xcs_soxs.constants import erg_per_keV, sigma_to_fwhm
from xcs_soxs.events import write_event_file
from xcs_soxs.instrument_registry import instrument_registry
from xcs_soxs.simput import read_simput_catalog
from xcs_soxs.utils import mylog, ensure_numpy_array, \
parse_prng, parse_value, get_rot_mat, soxs_cfg
def get_response_path(fn):
if os.path.exists(fn):
return os.path.abspath(fn)
else:
resp_path = soxs_cfg.get("soxs", "response_path")
if not os.path.exists(resp_path):
raise IOError("The SOXS response directory %s does not exist!" % resp_path)
resp_fn = os.path.join(resp_path, fn)
if os.path.exists(resp_fn):
return resp_fn
raise IOError("Could not find file %s! Please download it from " % fn +
"http://hea-www.cfa.harvard.edu/~jzuhone/soxs/responses.html "
"and place it in the current working directory or place it in "
"the SOXS response directory %s." % resp_path)
class SpatialARF(object):
def __init__(self, filenames, response_regions):
self.filename = filenames[0]
self.arf_files = filenames
self.response_regions = response_regions
first_file = pyfits.open(self.filename)
# Only need to read in one set of energy limits, for a set of ARFs generated to describe an instrument the
# energy bands should be the same
self.elo = first_file["SPECRESP"].data.field("ENERG_LO")
self.ehi = first_file["SPECRESP"].data.field("ENERG_HI")
self.emid = 0.5 * (self.elo + self.ehi)
first_file.close()
eff_areas = []
for filename in self.arf_files:
f = pyfits.open(filename)
eff_areas.append(np.nan_to_num(f["SPECRESP"].data.field("SPECRESP")).astype("float64"))
f.close()
self.eff_areas = np.array(eff_areas)
maxes = [areas.max() for areas in self.eff_areas]
self.max_area = max(maxes)
@classmethod
def from_instrument(cls, name):
"""
Return an :class:`~xcs_soxs.instrument.SpatialARF`
object from the name of an existing instrument
specification in SOXS.
Parameters
----------
name : string
The name of the instrument specification to use
to obtain the ARF object from.
Examples
--------
>>> arf = xcs_soxs.SpatialARF.from_instrument("xmm_epn_0201903501")
"""
instr = instrument_registry.get(name, None)
if instr is None:
raise KeyError("Instrument '%s' not in registry!" % name)
return cls(instr["arf"])
def __str__(self):
return self.filename
def find_response_region(self, x_coord, y_coord):
"""
Use the positions of the events, and the response regions, to determine which ARF to use.
Parameters
----------
x_coord : np.ndarray
The x coordinates of events, in the 'chip' coordinate system
y_coord : np.ndarray
The y coordinates of events, in the 'chip' coordinate system
"""
num_evts = x_coord.shape[0]
reg_ids = -np.ones(num_evts, dtype='int')
for reg_ind, reg in enumerate(self.response_regions):
if reg[0] == "Box":
inside_reg = np.logical_and.reduce((x_coord >= (reg[1] - (reg[3]/2)), x_coord <= (reg[1] + (reg[3]/2)),
y_coord >= (reg[2] - (reg[4]/2)), y_coord <= (reg[2] + (reg[4]/2))))
else:
region_type, region_args = (reg[0], reg[1:])
r = getattr(rfilter, region_type)(*region_args)
inside_reg = r.inside(x_coord, y_coord)
reg_ids[inside_reg] = reg_ind
return reg_ids
def interpolate_area(self, energy, arf_ind):
"""
Interpolate the effective area to the energies
provided by the supplied *energy* array.
"""
unique_arf_inds = np.unique(arf_ind)
e_area = np.zeros((1, len(energy)))
for a_ind in unique_arf_inds:
if a_ind != -1:
rel_inds = np.where(arf_ind == a_ind)[0]
rel_energies = energy[rel_inds]
e_area[0, rel_inds] = np.interp(rel_energies, self.emid, self.eff_areas[a_ind, :], left=0.0, right=0.0)
return u.Quantity(list(e_area[0, :]), "cm**2")
def detect_events(self, events, exp_time, flux, refband, prng=None):
"""
Use the ARF to determine a subset of photons which
will be detected. Returns a boolean NumPy array
which is the same is the same size as the number
of photons, wherever it is "true" means those photons
have been detected.
Parameters
----------
events : dict of np.ndarrays
The energies and positions of the photons.
exp_time : float
The exposure time in seconds.
flux : float
The total flux of the photons in erg/s/cm^2.
refband : array_like
A two-element array or list containing the limits
of the energy band which the flux was computed in.
resp_regs : list of lists
A list of lists that describe the regions each ARF file was generated for.
prng : :class:`~numpy.random.RandomState` object, integer, or None
A pseudo-random number generator. Typically will only
be specified if you have a reason to generate the same
set of random numbers, such as for a test. Default is None,
which sets the seed based on the system time.
"""
prng = parse_prng(prng)
energy = events["energy"]
if energy.size == 0:
return events
which_arfs = self.find_response_region(events["cx"], events["cy"])
earea = self.interpolate_area(energy, which_arfs).value
idxs = np.logical_and(energy >= refband[0], energy <= refband[1])
rate = flux/(energy[idxs].sum()*erg_per_keV)*earea[idxs].sum()
n_ph = prng.poisson(lam=rate*exp_time)
fak = float(n_ph)/energy.size
if fak > 1.0:
mylog.error("Number of events in sample: %d, Number of events wanted: %d" % (energy.size, n_ph))
raise ValueError("This combination of exposure time and effective area "
"will result in more photons being drawn than are available "
"in the sample!!!")
w = earea / self.max_area
randvec = prng.uniform(size=energy.size)
eidxs = prng.permutation(np.where(randvec < w)[0])[:n_ph].astype("int64")
mylog.info("%s events detected." % n_ph)
for key in events:
events[key] = events[key][eidxs]
return events
class AuxiliaryResponseFile(object):
r"""
A class for auxiliary response files (ARFs).
Parameters
----------
filename : string
The filename of the ARF to be read.
Examples
--------
>>> arf = AuxiliaryResponseFile("xrs_mucal_3x10_3.0eV.arf")
"""
def __init__(self, filename):
self.filename = get_response_path(filename)
f = pyfits.open(self.filename)
self.elo = f["SPECRESP"].data.field("ENERG_LO")
self.ehi = f["SPECRESP"].data.field("ENERG_HI")
self.emid = 0.5*(self.elo+self.ehi)
self.eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP")).astype("float64")
self.max_area = self.eff_area.max()
f.close()
@classmethod
def from_instrument(cls, name):
"""
Return an :class:`~xcs_soxs.instrument.AuxiliaryResponseFile`
object from the name of an existing instrument
specification in SOXS.
Parameters
----------
name : string
The name of the instrument specification to use
to obtain the ARF object from.
Examples
--------
>>> arf = xcs_soxs.AuxiliaryResponseFile.from_instrument("xmm_epn_0201903501")
"""
instr = instrument_registry.get(name, None)
if instr is None:
raise KeyError("Instrument '%s' not in registry!" % name)
return cls(instr["arf"])
def __str__(self):
return self.filename
def interpolate_area(self, energy):
"""
Interpolate the effective area to the energies
provided by the supplied *energy* array.
"""
earea = np.interp(energy, self.emid, self.eff_area, left=0.0, right=0.0)
return u.Quantity(earea, "cm**2")
def detect_events(self, events, exp_time, flux, refband, prng=None):
"""
Use the ARF to determine a subset of photons which
will be detected. Returns a boolean NumPy array
which is the same is the same size as the number
of photons, wherever it is "true" means those photons
have been detected.
Parameters
----------
events : dict of np.ndarrays
The energies and positions of the photons.
exp_time : float
The exposure time in seconds.
flux : float
The total flux of the photons in erg/s/cm^2.
refband : array_like
A two-element array or list containing the limits
of the energy band which the flux was computed in.
prng : :class:`~numpy.random.RandomState` object, integer, or None
A pseudo-random number generator. Typically will only
be specified if you have a reason to generate the same
set of random numbers, such as for a test. Default is None,
which sets the seed based on the system time.
"""
prng = parse_prng(prng)
energy = events["energy"]
if energy.size == 0:
return events
earea = self.interpolate_area(energy).value
idxs = np.logical_and(energy >= refband[0], energy <= refband[1])
rate = flux/(energy[idxs].sum()*erg_per_keV)*earea[idxs].sum()
n_ph = prng.poisson(lam=rate*exp_time)
fak = float(n_ph)/energy.size
if fak > 1.0:
mylog.error("Number of events in sample: %d, Number of events wanted: %d" % (energy.size, n_ph))
raise ValueError("This combination of exposure time and effective area "
"will result in more photons being drawn than are available "
"in the sample!!!")
w = earea / self.max_area
randvec = prng.uniform(size=energy.size)
eidxs = prng.permutation(np.where(randvec < w)[0])[:n_ph].astype("int64")
mylog.info("%s events detected." % n_ph)
for key in events:
events[key] = events[key][eidxs]
return events
def plot(self, xscale="log", yscale="log", xlabel=None,
ylabel=None, fig=None, ax=None, **kwargs):
"""
Make a quick plot of the effective area curve.
Parameters
----------
xscale : string
The scale of the x-axis. "linear" or "log".
yscale : string
The scale of the y-axis. "linear" or "log".
xlabel : string
The label of the x-axis. Default: "E (keV)"
ylabel : string
The label of the y-axis. Default: "$\mathrm{A\ (cm^2)}$"
fig : :class:`~matplotlib.figure.Figure`, optional
The figure to place the plot in. If not supplied,
one will be created.
ax : :class:`~matplotlib.axes.Axes`, optional
The axes to place the plot in. If not supplied,
one will be created.
All other arguments are passed to the call to
:meth:`~matplotlib.axes.Axes.plot`.
Returns
-------
A tuple of the :class:`~matplotlib.figure.Figure` and
:class:`~matplotlib.axes.Axes` objects.
"""
if xlabel is None:
xlabel = "E (keV)"
if ylabel is None:
ylabel = "$\mathrm{A\ (cm^2)}$"
if fig is None:
fig = plt.figure(figsize=(10, 10))
if ax is None:
ax = fig.add_subplot(111)
ax.plot(self.emid, self.eff_area, **kwargs)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig, ax
class FlatResponse(AuxiliaryResponseFile):
"""
A flat effective area response.
Parameters
----------
emin : float
The minimum energy of the response in keV.
emax : float
The maximum energy of the response in keV.
area : float
The effective area in cm**2.
nbins : integer
The number of bins in the response file.
Examples
--------
>>> arf = FlatResponse(0.1, 10.0, 3000.0, 10000)
"""
def __init__(self, emin, emax, area, nbins):
self.filename = "flat_response"
de = (emax-emin)/nbins
self.elo = np.arange(nbins)*de + emin
self.ehi = self.elo + de
self.emid = 0.5*(self.elo+self.ehi)
self.eff_area = area*np.ones(nbins)
self.max_area = area
class RedistributionMatrixFile(object):
r"""
A class for redistribution matrix files (RMFs).
Parameters
----------
filename : string
The filename of the RMF to be read.
Examples
--------
>>> rmf = RedistributionMatrixFile("xrs_hdxi.rmf")
"""
def __init__(self, filename):
self.filename = get_response_path(filename)
self.handle = pyfits.open(self.filename, memmap=True)
if "MATRIX" in self.handle:
self.mat_key = "MATRIX"
elif "SPECRESP MATRIX" in self.handle:
self.mat_key = "SPECRESP MATRIX"
else:
raise RuntimeError("Cannot find the response matrix in the RMF "
"file %s! " % filename+"It should be named "
"\"MATRIX\" or \"SPECRESP MATRIX\".")
self.header = self.handle[self.mat_key].header
self.num_mat_columns = len(self.handle[self.mat_key].columns)
self.ebounds_header = self.handle["EBOUNDS"].header
self.weights = np.array([w.sum() for w in self.data["MATRIX"]])
self.elo = self.data["ENERG_LO"]
self.ehi = self.data["ENERG_HI"]
self.ebins = np.append(self.data["ENERG_LO"], self.data["ENERG_HI"][-1])
self.emid = 0.5*(self.elo+self.ehi)
self.de = self.ehi-self.elo
self.n_e = self.elo.size
self.n_ch = self.header["DETCHANS"]
num = 0
for i in range(1, self.num_mat_columns+1):
if self.header["TTYPE%d" % i] == "F_CHAN":
num = i
break
self.cmin = self.header.get("TLMIN%d" % num, 1)
self.cmax = self.header.get("TLMAX%d" % num, self.n_ch)
@classmethod
def from_instrument(cls, name):
"""
Return an :class:`~xcs_soxs.instrument.RedistributionMatrixFile`
object from the name of an existing instrument
specification in SOXS.
Parameters
----------
name : string
The name of the instrument specification to use
to obtain the RMF object from.
Examples
--------
>>> arf = xcs_soxs.RedistributionMatrixFile.from_instrument("hdxi")
"""
instr = instrument_registry.get(name, None)
if instr is None:
raise KeyError("Instrument '%s' not in registry!" % name)
return cls(instr["rmf"])
@property
def data(self):
return self.handle[self.mat_key].data
@property
def ebounds_data(self):
return self.handle["EBOUNDS"].data
def __str__(self):
return self.filename
def _make_channels(self, k):
# build channel number list associated to array value,
# there are groups of channels in rmfs with nonzero probabilities
trueChannel = []
f_chan = ensure_numpy_array(np.nan_to_num(self.data["F_CHAN"][k]))
n_chan = ensure_numpy_array(np.nan_to_num(self.data["N_CHAN"][k]))
for start, nchan in zip(f_chan, n_chan):
if nchan == 0:
trueChannel.append(start)
else:
trueChannel += list(range(start, start + nchan))
return np.array(trueChannel)
def e_to_ch(self, energy):
energy = parse_value(energy, "keV")
return np.searchsorted(self.ebounds_data["E_MIN"], energy)-1
def scatter_energies(self, events, prng=None):
"""
Scatter photon energies with the RMF and produce the
corresponding channel values.
Parameters
----------
events : dict of np.ndarrays
The energies and positions of the photons.
prng : :class:`~numpy.random.RandomState` object, integer, or None
A pseudo-random number generator. Typically will only
be specified if you have a reason to generate the same
set of random numbers, such as for a test. Default is None,
which sets the seed based on the system time.
"""
prng = parse_prng(prng)
eidxs = np.argsort(events["energy"])
sorted_e = events["energy"][eidxs]
detectedChannels = []
# run through all photon energies and find which bin they go in
fcurr = 0
last = sorted_e.shape[0]
emin = sorted_e[0]
emax = sorted_e[-1]
pbar = tqdm(leave=True, total=last, desc="Scattering energies ")
for (k, low), high in zip(enumerate(self.elo), self.ehi):
if high < emin or low > emax:
continue
e = sorted_e[fcurr:last]
nn = | np.logical_and(low <= e, e < high) | numpy.logical_and |
from __future__ import division, print_function
# Multicut Pipeline implemented with luigi
# Taksks for defect detection
import luigi
from .customTargets import HDF5DataTarget, VolumeTarget
from .dataTasks import ExternalSegmentation
from .pipelineParameter import PipelineParameter
from .tools import config_logger, run_decorator
import logging
import os
import numpy as np
import vigra
from concurrent import futures
# import the proper nifty version
try:
import nifty
except ImportError:
try:
import nifty_with_cplex as nifty
except ImportError:
import nifty_with_gurobi as nifty
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
class OversegmentationPatchStatistics(luigi.Task):
pathToSeg = luigi.Parameter()
patchSize = luigi.Parameter()
def requires(self):
return ExternalSegmentation(self.pathToSeg)
@run_decorator
def run(self):
seg = self.input()
seg.open()
ny = seg.shape()[1]
nx = seg.shape()[2]
patch_shape = [self.patchSize, self.patchSize]
def extract_patch_statistics_slice(z):
# 2d blocking representing the patches
seg_z = seg.read([z, 0, 0], [z + 1, ny, nx])
patches = nifty.tools.blocking(roiBegin=[0, 0], roiEnd=[ny, nx], blockShape=patch_shape)
# get number of segments for patches in this slice
n_segs_z = []
for patch_id in range(patches.numberOfBlocks):
patch = patches.getBlock(patch_id)
patch_begin, patch_end = patch.begin, patch.end
patch_slicing = np.s_[patch_begin[0]:patch_end[0], patch_begin[1]:patch_end[1]]
n_segs_z.append(np.unique(seg_z[patch_slicing]).shape[0])
return n_segs_z
# parallel
with futures.ThreadPoolExecutor(max_workers=PipelineParameter().nThreads) as executor:
tasks = []
for z in range(seg.shape()[0]):
tasks.append(executor.submit(extract_patch_statistics_slice, z))
segs_per_patch = []
for fut in tasks:
segs_per_patch.extend(fut.result())
mean = np.mean(segs_per_patch)
std = np.std(segs_per_patch)
# calculate histogram to have a closer look at the stats
n_bins = 16
histo, bin_edges = | np.histogram(segs_per_patch, bins=n_bins) | numpy.histogram |
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from pyrr import quaternion
class test_quaternion(unittest.TestCase):
# many of these values are taken from searches on wolfram alpha
def test_import(self):
import pyrr
pyrr.quaternion
from pyrr import quaternion
def test_create(self):
result = quaternion.create()
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_parameters(self):
result = quaternion.create(1.0, 2.0, 3.0, 4.0)
np.testing.assert_almost_equal(result, [1.0, 2.0, 3.0, 4.0], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_x_rotation(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(np.pi)
self.assertTrue(np.allclose(q, [1., 0., 0., 0.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [np.sqrt(0.5), 0., 0., np.sqrt(0.5)]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(q, [-np.sqrt(0.5), 0., 0., np.sqrt(0.5)]))
def test_create_from_y_rotation(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(np.pi)
self.assertTrue(np.allclose(q, [0., 1., 0., 0.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [0., np.sqrt(0.5), 0., np.sqrt(0.5)]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-np.pi / 2.)
def test_create_from_z_rotation(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(np.pi)
self.assertTrue(np.allclose(q, [0., 0., 1., 0.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [0., 0., np.sqrt(0.5), np.sqrt(0.5)]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-np.pi / 2.)
def test_create_from_axis_rotation(self):
# wolfram alpha can be awesome sometimes
result = quaternion.create_from_axis_rotation([0.57735, 0.57735, 0.57735], np.pi)
np.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == np.float)
def test_create_from_axis_rotation_non_normalized(self):
result = quaternion.create_from_axis_rotation([1., 1., 1.], np.pi)
np.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_unit(self):
result = quaternion.create_from_matrix(np.eye(3))
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_x(self):
result = quaternion.create_from_matrix([
[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.],
])
np.testing.assert_almost_equal(result, [1., 0., 0., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_y(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., 1., 0.],
[0., 0., -1.],
])
np.testing.assert_almost_equal(result, [0., 1., 0., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_z(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., -1., 0.],
[0., 0., 1.],
])
np.testing.assert_almost_equal(result, [0., 0., 1., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
@unittest.skip('Not implemented')
def test_create_from_eulers(self):
pass
@unittest.skip('Not implemented')
def test_create_from_inverse_of_eulers(self):
pass
def test_cross(self):
q1 = quaternion.create_from_x_rotation(np.pi / 2.0)
q2 = quaternion.create_from_x_rotation(-np.pi / 2.0)
result = quaternion.cross(q1, q2)
np.testing.assert_almost_equal(result, quaternion.create(), decimal=5)
def test_quaternion_slerp(self):
sqrt2 = np.sqrt(2) / 2
identity = np.array([0.0, 0.0, 0.0, 1.0])
y90rot = np.array([0.0, sqrt2, 0.0, sqrt2])
y180rot = np.array([0.0, 1.0, 0.0, 0.0])
# Testing a == 0
# Must be id
result = quaternion.slerp(identity, y90rot, 0.0)
np.testing.assert_almost_equal(result, identity, decimal=4)
# Testing a == 1
# Must be 90° rotation on Y : 0 0.7 0 0.7
result = quaternion.slerp(identity, y90rot, 1.0)
np.testing.assert_almost_equal(result, y90rot, decimal=4)
# Testing standard, easy case
# Must be 45° rotation on Y : 0 0.38 0 0.92
y45rot1 = quaternion.slerp(identity, y90rot, 0.5)
# Testing reverse case
# Must be 45° rotation on Y : 0 0.38 0 0.92
y45rot2 = quaternion.slerp(y90rot, identity, 0.5)
np.testing.assert_almost_equal(y45rot1, y45rot2, decimal=4)
# Testing against full circle around the sphere instead of shortest path
# Must be 45° rotation on Y
# certainly not a 135° rotation
# y45rot3 = quaternion.slerp(identity, quaternion.negate(y90rot), 0.5)
y45rot3 = quaternion.slerp(identity, y90rot, 0.5)
y45angle3 = quaternion.rotation_angle(y45rot3)
np.testing.assert_almost_equal(y45angle3 * 180 / np.pi, 45, decimal=4)
np.testing.assert_almost_equal(y45angle3, np.pi / 4, decimal=4)
# # Same, but inverted
# # Must also be 45° rotation on Y : 0 0.38 0 0.92
# # -0 -0.38 -0 -0.92 is ok too
y45rot4 = quaternion.slerp(-y90rot, identity, 0.5)
np.testing.assert_almost_equal(np.abs(y45rot4), y45rot2, decimal=4)
# # Testing q1 = q2
# # Must be 90° rotation on Y : 0 0.7 0 0.7
y90rot3 = quaternion.slerp(y90rot, y90rot, 0.5);
np.testing.assert_almost_equal(y90rot3, y90rot, decimal=4)
# # Testing 180° rotation
# # Must be 90° rotation on almost any axis that is on the XZ plane
xz90rot = quaternion.slerp(identity, -y90rot, 0.5)
xz90rot = quaternion.rotation_angle(xz90rot)
np.testing.assert_almost_equal(xz90rot, np.pi / 4, decimal=4)
def test_is_zero_length(self):
result = quaternion.is_zero_length([1., 0., 0., 0.])
self.assertFalse(result)
def test_is_zero_length_zero(self):
result = quaternion.is_zero_length([0., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length(self):
result = quaternion.is_non_zero_length([1., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length_zero(self):
result = quaternion.is_non_zero_length([0., 0., 0., 0.])
self.assertFalse(result)
def test_squared_length_identity(self):
result = quaternion.squared_length([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, 1., decimal=5)
def test_squared_length(self):
result = quaternion.squared_length([1., 1., 1., 1.])
np.testing.assert_almost_equal(result, 4., decimal=5)
def test_squared_length_batch(self):
result = quaternion.squared_length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
np.testing.assert_almost_equal(result, [1., 4.], decimal=5)
def test_length_identity(self):
result = quaternion.length([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, 1., decimal=5)
def test_length(self):
result = quaternion.length([1., 1., 1., 1.])
np.testing.assert_almost_equal(result, 2., decimal=5)
def test_length_batch(self):
result = quaternion.length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
np.testing.assert_almost_equal(result, [1., 2.], decimal=5)
def test_normalize_identity(self):
# normalize an identity quaternion
result = quaternion.normalize([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_normalize_non_identity(self):
# normalize an identity quaternion
result = quaternion.normalize([1., 2., 3., 4.])
np.testing.assert_almost_equal(result, [1. / np.sqrt(30.), np.sqrt(2. / 15.), np.sqrt(3. / 10.), 2. * np.sqrt(2. / 15.)], decimal=5)
def test_normalize_batch(self):
# normalize an identity quaternion
result = quaternion.normalize([
[0., 0., 0., 1.],
[1., 2., 3., 4.],
])
expected = [
[0., 0., 0., 1.],
[1. / np.sqrt(30.), np.sqrt(2. / 15.), np.sqrt(3. / 10.), 2. * np.sqrt(2. / 15.)],
]
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_rotation_angle(self):
result = quaternion.rotation_angle([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, np.pi, decimal=5)
def test_rotation_axis(self):
result = quaternion.rotation_axis([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [0.57735, 0.57735, 0.57735], decimal=5)
def test_dot_adjacent(self):
result = quaternion.dot([1., 0., 0., 0.], [0., 1., 0., 0.])
np.testing.assert_almost_equal(result, 0.0, decimal=5)
def test_dot_parallel(self):
result = quaternion.dot([0., 1., 0., 0.], [0., 1., 0., 0.])
np.testing.assert_almost_equal(result, 1.0, decimal=5)
def test_dot_angle(self):
result = quaternion.dot([.2, .2, 0., 0.], [2., -.2, 0., 0.])
np.testing.assert_almost_equal(result, 0.36, decimal=5)
def test_dot_batch(self):
result = quaternion.dot([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[.2, .2, 0., 0.]
], [
[0., 1., 0., 0.],
[0., 1., 0., 0.],
[2., -.2, 0., 0.]
])
expected = [0., 1., 0.36]
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_conjugate(self):
#result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
result = quaternion.conjugate([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_conjugate_rotation(self):
result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [-0.57735, -0.57735, -0.57735, 6.12323e-17], decimal=5)
@unittest.skip('Not implemented')
def test_power(self):
pass
def test_inverse(self):
result = quaternion.inverse([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_inverse_rotation(self):
result = quaternion.inverse([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [-0.577351, -0.577351, -0.577351, 6.12324e-17], decimal=5)
def test_inverse_non_unit(self):
q = [1, 2, 3, 4]
result = quaternion.inverse(q)
expected = quaternion.conjugate(q) / quaternion.length(q)
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_negate_unit(self):
result = quaternion.negate([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., -1.], decimal=5)
def test_negate(self):
result = quaternion.negate([1., 2., 3., 4.])
np.testing.assert_almost_equal(result, [-1., -2., -3., -4.], decimal=5)
def test_apply_to_vector_unit_x(self):
result = quaternion.apply_to_vector([0., 0., 0., 1.], [1., 0., 0.])
np.testing.assert_almost_equal(result, [1., 0., 0.], decimal=5)
def test_apply_to_vector_x(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0., 1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0.,-1., 0.]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0.,-1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 1., 0.]))
def test_apply_to_vector_y(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0.,-1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [1., 0., 0.]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0., 1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [-1., 0., 0.]))
def test_apply_to_vector_z(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
def test_apply_to_vector_non_unit(self):
q = quaternion.create_from_x_rotation(np.pi)
# zero length
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 0.]), [0., 0., 0.]))
# >1 length
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [2., 0., 0.]), [2., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 2., 0.]), [0.,-2., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 2.]), [0., 0.,-2.]))
def test_identity(self):
# https://en.wikipedia.org/wiki/Quaternion
i = quaternion.create(1., 0., 0., 0.)
j = quaternion.create(0., 1., 0., 0.)
k = quaternion.create(0., 0., 1., 0.)
one = quaternion.create(0., 0., 0., 1.)
# i * 1 = i
# j * 1 = j
# k * 1 = k
# 1 * i = i
# 1 * j = j
# 1 * k = k
i1 = quaternion.cross(i, one)
j1 = quaternion.cross(j, one)
k1 = quaternion.cross(k, one)
_1i = quaternion.cross(one, i)
_1j = quaternion.cross(one, j)
_1k = quaternion.cross(one, k)
self.assertTrue(np.allclose(i1, _1i, i))
self.assertTrue(np.allclose(j1, _1j, j))
self.assertTrue(np.allclose(k1, _1k, k))
# result = -1
ii = quaternion.cross(i, i)
kk = quaternion.cross(k, k)
jj = quaternion.cross(j, j)
ijk = quaternion.cross(quaternion.cross(i, j), k)
self.assertTrue(np.allclose(ii, -one))
self.assertTrue(np.allclose(jj, -one))
self.assertTrue(np.allclose(kk, -one))
self.assertTrue(np.allclose(ijk, -one))
# ij = k
# ji = -k
# jk = i
# kj = -i
# ki = j
# ik = -j
ij = quaternion.cross(i, j)
ji = quaternion.cross(j, i)
jk = quaternion.cross(j, k)
kj = quaternion.cross(k, j)
ki = quaternion.cross(k, i)
ik = quaternion.cross(i, k)
self.assertTrue(np.allclose(ij, k))
self.assertTrue( | np.allclose(ji, -k) | numpy.allclose |
"""
Team name: ThE raNDom WALkERS
Members: <NAME>, <NAME>, <NAME>
"""
import pandas as pd
import numpy as np
from typing import Union
import pandas as pd
import numpy as np
from typing import Union
def compute_beta_vec(bo, epsilon_vec):
"""Compute beta from distribution of f."""
sorted_f_vec = np.sort(bo.f_vec)
f0 = sorted_f_vec[0]
N0 = ((sorted_f_vec - f0) == 0).sum()
f1_index = np.where((sorted_f_vec - f0) > 0)[0][0]
f1 = sorted_f_vec[f1_index]
N1 = ((sorted_f_vec - f1) == 0).sum()
beta_vec = np.log(N1 / (N0 * epsilon_vec)) / (f1 - f0)
return beta_vec
def get_optimal_betas_df(lambda_vec, epsilon_vec, lambda_arrays_list, model_string='G1'):
"""Return a dataframe containing the optimal values for beta."""
running_list = []
model_repeated = np.repeat(model_string, len(epsilon_vec))
for i, lambda_array in enumerate(lambda_arrays_list):
lambda_repeated = np.repeat(lambda_vec[i], len(epsilon_vec))
l = list(zip(model_repeated, lambda_repeated, epsilon_vec, | np.median(lambda_array, axis=0) | numpy.median |
from glob import glob
import numpy as np
import scipy.sparse as sparse
import matplotlib.pyplot as plt
import networkx as nx
import operator
from spatialpower.tissue_generation import assign_labels
from spatialpower.tissue_generation import visualization
results_dir = './results/motif_detection/'
adj_mat_list = np.sort(glob(results_dir + 'blank_graph_network*.npy'))
pos_mat_list = np.sort(glob(results_dir + 'blank_graph_positions*.npy'))
dim = 300
##RANDOM##
cell_type_probabilities = np.ones(10) * 0.1
neighborhood_probabilities = | np.ones((10,10)) | numpy.ones |
from dsynth.view_datasets.tless import TlessMultiviewDataset
from dsynth import MultiviewWarper
import numpy as np
def test_tless_dataset():
dataset = TlessMultiviewDataset(obj_id=2, unit_test=True)
ibr = MultiviewWarper(dataset)
R = | np.reshape(dataset[1].cam_R, (3,3)) | numpy.reshape |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
from MiniFramework.Layer import *
from MiniFramework.ActivationLayer import *
from MiniFramework.ClassificationLayer import *
class GRUCell(object):
def __init__(self, input_size, hidden_size):
self.input_size = input_size
self.hidden_size = hidden_size
def split_params(self, w, size):
s=[]
for i in range(3):
s.append(w[(i*size):((i+1)*size)])
return s[0], s[1], s[2]
# Get shared parameters, and split them to fit 3 gates, in the order of z, r, \tilde{h} (n stands for \tilde{h} in code)
def get_params(self, W, U):
self.wz, self.wr, self.wn = self.split_params(W, self.hidden_size)
self.uz, self.ur, self.un = self.split_params(U, self.input_size)
def forward(self, x, h_p, W, U):
self.get_params(W, U)
self.x = x
self.z = Sigmoid().forward(np.dot(h_p, self.wz) + np.dot(x, self.uz))
self.r = Sigmoid().forward(np.dot(h_p, self.wr) + np.dot(x, self.ur))
self.n = Tanh().forward(np.dot((self.r * h_p), self.wn) + np.dot(x, self.un))
self.h = (1 - self.z) * h_p + self.z * self.n
def backward(self, h_p, in_grad):
self.dzz = in_grad * (self.n - h_p) * self.z * (1 - self.z)
self.dzn = in_grad * self.z * (1 - self.n * self.n)
self.dzr = np.dot(self.dzn, self.wn.T) * h_p * self.r * (1 - self.r)
self.dwn = np.dot((self.r * h_p).T, self.dzn)
self.dun = np.dot(self.x.T, self.dzn)
self.dwr = np.dot(h_p.T, self.dzr)
self.dur = np.dot(self.x.T, self.dzr)
self.dwz = | np.dot(h_p.T, self.dzz) | numpy.dot |
import os
import io
import json
import math
import tempfile
import contextlib
from typing import Union, Iterable, List, Optional, Tuple
import glob
from functools import reduce
from types import SimpleNamespace
from enum import Enum
from collections import namedtuple
from copy import copy, deepcopy
from itertools import groupby
import warnings
import numpy as np
import imageio
from boltons.setutils import IndexedSet
try:
import matplotlib.cm
except ImportError:
warnings.warn(
"Visualization dependencies not available, colorize will not work",
ImportWarning,
stacklevel=2,
)
from rasterio.crs import CRS
from rasterio.rpc import RPC
import rasterio
import rasterio.warp
import rasterio.shutil
from rasterio.coords import BoundingBox
from rasterio._err import CPLE_AppDefinedError
from rasterio.enums import Resampling, Compression
from rasterio.features import geometry_mask
from rasterio.windows import Window, WindowMethodsMixin
from rasterio.io import MemoryFile
from affine import Affine
from shapely.geometry import Point, Polygon
from PIL import Image
from telluric.constants import WEB_MERCATOR_CRS, MERCATOR_RESOLUTION_MAPPING, RASTER_TYPE, WGS84_CRS
from telluric.vectors import GeoVector
from telluric.util.projections import transform
from telluric.util.raster_utils import (
convert_to_cog, _calc_overviews_factors,
_mask_from_masked_array, _join_masks_from_masked_array,
calc_transform, warp)
from telluric.vrt import (
boundless_vrt_doc,
raster_list_vrt,
raster_collection_vrt,
wms_vrt)
try:
from telluric.util.local_tile_server import TileServer
except ImportError:
warnings.warn(
"Visualization dependencies not available, local tile server will not work",
ImportWarning,
stacklevel=2,
)
dtype_map = {
np.uint8: rasterio.uint8,
np.uint16: rasterio.uint16,
np.uint32: rasterio.uint32,
np.int16: rasterio.int16,
np.int32: rasterio.int32,
np.float32: rasterio.float32,
np.float64: rasterio.float64,
}
gdal_drivers = {
'tif': 'GTiff',
'tiff': 'GTiff',
'png': 'PNG',
'jpg': 'JPEG',
'jpeg': 'JPEG',
}
band_names_tag = 'telluric_band_names'
class MergeStrategy(Enum):
LEFT_ALL = 0
INTERSECTION = 1
UNION = 2
class PixelStrategy(Enum):
INDEX = 0
FIRST = 1
def join(rasters):
"""
This method takes a list of rasters and returns a raster that is constructed of all of them
"""
raster = rasters[0] # using the first raster to understand what is the type of data we have
mask_band = None
nodata = None
with raster._raster_opener(raster.source_file) as r:
nodata = r.nodata
mask_flags = r.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
if per_dataset_mask and nodata is None:
mask_band = 0
return GeoRaster2.from_rasters(rasters, relative_to_vrt=False, nodata=nodata, mask_band=mask_band)
def _dest_resolution(first_raster, crs):
transform, _, _ = rasterio.warp.calculate_default_transform(
first_raster.crs, crs, first_raster.width, first_raster.height,
*first_raster.footprint().get_bounds(first_raster.crs))
dest_resolution = abs(transform.a), abs(transform.e)
return dest_resolution
def merge_all(rasters, roi=None, dest_resolution=None, merge_strategy=MergeStrategy.UNION,
shape=None, ul_corner=None, crs=None, pixel_strategy=PixelStrategy.FIRST,
resampling=Resampling.nearest, crop=True):
"""Merge a list of rasters, cropping (optional) by a region of interest.
There are cases that the roi is not precise enough for this cases one can use,
the upper left corner the shape and crs to precisely define the roi.
When roi is provided the ul_corner, shape and crs are ignored.
NB: Reading rotated rasters with GDAL (and rasterio) gives unpredictable result
and in order to overcome this you must use the warping algorithm to apply the rotation (it
might be acomplished by using gdalwarp utility). Hence we should have the possibility to
disable cropping, otherwise calling merge_all on rotated rasters may cause fails.
"""
first_raster = rasters[0]
if roi:
crs = crs or roi.crs
dest_resolution = dest_resolution or _dest_resolution(first_raster, crs)
# Create empty raster
empty = GeoRaster2.empty_from_roi(
roi, resolution=dest_resolution, band_names=first_raster.band_names,
dtype=first_raster.dtype, shape=shape, ul_corner=ul_corner, crs=crs)
# Create a list of single band rasters
if not crop:
warnings.warn(
"The option to disable crop has been added to overcome rare issues that happen "
"while working with rotated rasters and it is not yet well tested.",
stacklevel=2
)
all_band_names, projected_rasters = _prepare_rasters(rasters, merge_strategy, empty,
resampling=resampling, crop=crop)
assert len(projected_rasters) == len(rasters)
prepared_rasters = _apply_pixel_strategy(projected_rasters, pixel_strategy)
# Extend the rasters list with only those that have the requested bands
prepared_rasters = _explode_rasters(prepared_rasters, all_band_names)
if all_band_names:
# Merge common bands
prepared_rasters = _merge_common_bands(prepared_rasters)
# Merge all bands
raster = reduce(_stack_bands, prepared_rasters)
return empty.copy_with(image=raster.image, band_names=raster.band_names)
else:
raise ValueError("result contains no bands, use another merge strategy")
def _apply_pixel_strategy(rasters, pixel_strategy):
# type: (List[Optional[_Raster]], PixelStrategy) -> List[_Raster]
if pixel_strategy == PixelStrategy.INDEX:
new_rasters = []
for ii, raster in enumerate(rasters):
if raster:
new_image = np.ma.masked_array(
np.full_like(raster.image.data, ii, dtype=int),
raster.image.mask
) # type: np.ndarray
new_rasters.append(_Raster(image=new_image, band_names=raster.band_names))
return new_rasters
elif pixel_strategy == PixelStrategy.FIRST:
# The way merge_all is written now, this pixel strategy is the default one
# and all the steps in the chain are prepared for it, so no changes needed
# apart from taking out None values
return [raster for raster in rasters if raster]
else:
raise ValueError("Please use an allowed pixel_strategy")
def _explode_rasters(projected_rasters, all_band_names):
# type: (List[_Raster], IndexedSet[str]) -> List[_Raster]
prepared_rasters = []
for projected_raster in projected_rasters:
prepared_rasters.extend(_explode_raster(projected_raster, all_band_names))
return prepared_rasters
def _merge_common_bands(rasters):
# type: (List[_Raster]) -> List[_Raster]
"""Combine the common bands.
"""
# Compute band order
all_bands = IndexedSet([rs.band_names[0] for rs in rasters])
def key(rs):
return all_bands.index(rs.band_names[0])
rasters_final = [] # type: List[_Raster]
for band_name, rasters_group in groupby(sorted(rasters, key=key), key=key):
rasters_final.append(reduce(_fill_pixels, rasters_group))
return rasters_final
def _prepare_rasters(
rasters, # type: List[GeoRaster2]
merge_strategy, # type: MergeStrategy
first, # type: GeoRaster2
resampling=Resampling.nearest, # type: Resampling
crop=True, # type: bool
):
# type: (...) -> Tuple[IndexedSet[str], List[Optional[_Raster]]]
"""Prepares the rasters according to the baseline (first) raster and the merge strategy.
The baseline (first) raster is used to crop and reproject the other rasters,
while the merge strategy is used to compute the bands of the result. These
are returned for diagnostics.
"""
# Create list of prepared rasters
all_band_names = IndexedSet(first.band_names)
projected_rasters = []
for raster in rasters:
try:
projected_raster = _prepare_other_raster(first, raster, resampling=resampling, crop=crop)
except ValueError:
projected_raster = None
# Modify the bands only if an intersecting raster was returned
if projected_raster:
if merge_strategy is MergeStrategy.INTERSECTION:
all_band_names.intersection_update(projected_raster.band_names)
elif merge_strategy is MergeStrategy.UNION:
all_band_names.update(projected_raster.band_names)
# Some rasters might be None. In this way, we still retain the original order
projected_rasters.append(projected_raster)
return all_band_names, projected_rasters
# noinspection PyDefaultArgument
def _explode_raster(raster, band_names=[]):
# type: (_Raster, Iterable[str]) -> List[_Raster]
"""Splits a raster into multiband rasters.
"""
# Using band_names=[] does no harm because we are not mutating it in place
# and it makes MyPy happy
if not band_names:
band_names = raster.band_names
else:
band_names = list(IndexedSet(raster.band_names).intersection(band_names))
return [_Raster(image=raster.bands_data([band_name]), band_names=[band_name]) for band_name in band_names]
def _prepare_other_raster(one, other, resampling=Resampling.nearest, crop=True):
# type: (GeoRaster2, GeoRaster2, Resampling, bool) -> Union[_Raster, None]
# Crop and reproject the second raster, if necessary.
if not (one.crs == other.crs and one.affine.almost_equals(other.affine) and one.shape == other.shape):
if one.footprint().intersects(other.footprint()):
if crop:
if one.crs != other.crs:
src_bounds = one.footprint().get_bounds(other.crs)
src_vector = GeoVector(Polygon.from_bounds(*src_bounds), other.crs)
src_width, src_height = (
src_bounds.right - src_bounds.left,
src_bounds.top - src_bounds.bottom)
buffer_ratio = int(os.environ.get("TELLURIC_MERGE_CROP_BUFFER", 10))
buffer_size = max(src_width, src_height) * (buffer_ratio / 100)
other = other.crop(src_vector.buffer(buffer_size))
else:
other = other.crop(one.footprint(), resolution=one.resolution())
if other.height == 0 or other.width == 0:
return None
other = other._reproject(new_width=one.width, new_height=one.height,
dest_affine=one.affine, dst_crs=one.crs,
resampling=resampling)
else:
return None
return _Raster(image=other.image, band_names=other.band_names)
def _fill_pixels(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two single band rasters with the same band by filling the pixels according to depth.
"""
assert len(one.band_names) == len(other.band_names) == 1, "Rasters are not single band"
# We raise an error in the intersection is empty.
# Other options include returning an "empty" raster or just None.
# The problem with the former is that GeoRaster2 expects a 2D or 3D
# numpy array, so there is no obvious way to signal that this raster
# has no bands. Also, returning a (1, 1, 0) numpy array is useless
# for future concatenation, so the expected shape should be used
# instead. The problem with the latter is that it breaks concatenation
# anyway and requires special attention. Suggestions welcome.
if one.band_names != other.band_names:
raise ValueError("rasters have no bands in common, use another merge strategy")
new_image = one.image.copy()
other_image = other.image
# The values that I want to mask are the ones that:
# * Were already masked in the other array, _or_
# * Were already unmasked in the one array, so I don't overwrite them
other_values_mask = (np.ma.getmaskarray(other_image)[0] | (~np.ma.getmaskarray(one.image)[0]))
# Reshape the mask to fit the future array
other_values_mask = other_values_mask[None, ...]
# Overwrite the values that I don't want to mask
new_image[~other_values_mask] = other_image[~other_values_mask]
# In other words, the values that I wanted to write are the ones that:
# * Were already masked in the one array, _and_
# * Were not masked in the other array
# The reason for using the inverted form is to retain the semantics
# of "masked=True" that apply for masked arrays. The same logic
# could be written, using the De Morgan's laws, as
# other_values_mask = (one.image.mask[0] & (~other_image.mask[0])
# other_values_mask = other_values_mask[None, ...]
# new_image[other_values_mask] = other_image[other_values_mask]
# but here the word "mask" does not mean the same as in masked arrays.
return _Raster(image=new_image, band_names=one.band_names)
def _stack_bands(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two rasters with non overlapping bands by stacking the bands.
"""
assert set(one.band_names).intersection(set(other.band_names)) == set()
# We raise an error in the bands are the same. See above.
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
# Apply "or" to the mask in the same way rasterio does, see
# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks
# In other words, mask the values that are already masked in either
# of the two rasters, since one mask per band is not supported
new_mask = np.ma.getmaskarray(one.image)[0] | | np.ma.getmaskarray(other.image) | numpy.ma.getmaskarray |
import os
import numpy as np
import scipy.constants as ct
from scipy.io import readsav as rsav
from .load_quantities import *
from .load_arithmetic_quantities import *
from .tools import *
from .load_noeos_quantities import *
from . import document_vars
class Matsumotosav:
"""
Class to read Matsumoto's sav file atmosphere.
Snapshots from a MHD simulation ( Matsumoto 2018 )
https://ui.adsabs.harvard.edu/abs/2018MNRAS.476.3328M/abstract
Parameters
----------
fdir : str, optional
Directory with snapshots.
rootname : str
Template for snapshot number.
it : integer
Snapshot number to read. By default reads the loaded snapshot;
if a different number is requested, will load that snapshot.
verbose : bool, optional
If True, will print more information.
"""
def __init__(self, rootname, snap, fdir='.', sel_units = 'cgs', verbose=True):
self.fdir = fdir
self.rootname = rootname
self.savefile = rsav(os.path.join(fdir,rootname+'{:06d}'.format(snap)+'.sav'))
self.snap = snap
self.sel_units= sel_units
self.verbose = verbose
self.uni = Matsumotosav_units()
self.time = self.savefile['v']['time'][0].copy()
self.grav = self.savefile['v']['gx'][0].copy()
self.gamma = self.savefile['v']['gm'][0].copy()
if self.sel_units=='cgs':
self.x = self.savefile['v']['x'][0].copy() # cm
self.y = self.savefile['v']['y'][0].copy()
self.z = self.savefile['v']['z'][0].copy()
self.dx = self.savefile['v']['dx'][0].copy()
self.dy = self.savefile['v']['dy'][0].copy()
self.dz = self.savefile['v']['dz'][0].copy()
else:
self.x = self.savefile['v']['x'][0].copy()/1e8 # Mm
self.y = self.savefile['v']['y'][0].copy()/1e8
self.z = self.savefile['v']['z'][0].copy()/1e8
self.dx = self.savefile['v']['dx'][0].copy()/1e8
self.dy = self.savefile['v']['dy'][0].copy()/1e8
self.dz = self.savefile['v']['dz'][0].copy()/1e8
self.nx = len(self.x)
self.ny = len(self.y)
self.nz = len(self.z)
if self.nx > 1:
self.dx1d = np.gradient(self.x)
else:
self.dx1d = np.zeros(self.nx)
if self.ny > 1:
self.dy1d = np.gradient(self.y)
else:
self.dy1d = np.zeros(self.ny)
if self.nz > 1:
self.dz1d = np.gradient(self.z)
else:
self.dz1d = np.zeros(self.nz)
self.transunits = False
self.cstagop = False # This will not allow to use cstagger from Bifrost in load
self.hion = False # This will not allow to use HION from Bifrost in load
self.genvar()
document_vars.create_vardict(self)
document_vars.set_vardocs(self)
def get_var(self,var , *args, snap=None, iix=None, iiy=None, iiz=None, layout=None, **kargs):
'''
Reads the variables from a snapshot (snap).
Parameters
----------
var - string
Name of the variable to read. Must be Bifrost internal names.
snap - integer, optional
Snapshot number to read. By default reads the loaded snapshot;
if a different number is requested, will load that snapshot.
Axes:
-----
y and z axes horizontal plane
x-axis is vertical axis, top corona is first index and negative.
Variable list:
--------------
ro -- Density (g/cm^3) [nx, ny, nz]
temperature -- Temperature (K) [nx, ny, nz]
vx -- component x of the velocity (cm/s) [nx, ny, nz]
vy -- component y of the velocity (cm/s) [nx, ny, nz]
vz -- component z of the velocity (cm/s) [nx, ny, nz]
bx -- component x of the magnetic field (G) [nx, ny, nz]
by -- component y of the magnetic field (G) [nx, ny, nz]
bz -- component z of the magnetic field (G) [nx, ny, nz]
pressure -- Pressure (dyn/cm^2) [nx, ny, nz]
'''
if snap != None:
self.snap = snap
self.savefile = rsav(os.path.join(self.fdir,self.rootname+'{:06d}'.format(self.snap)+'.sav'))
if var in self.varn.keys():
varname=self.varn[var]
else:
varname=var
try:
if self.sel_units == 'cgs':
varu=var.replace('x','')
varu=varu.replace('y','')
varu=varu.replace('z','')
if (var in self.varn.keys()) and (varu in self.uni.uni.keys()):
cgsunits = self.uni.uni[varu]
else:
cgsunits = 1.0
else:
cgsunits = 1.0
self.data = self.savefile['v'][varname][0].T * cgsunits
'''
if (np.shape(self.data)[0]>self.nx):
self.data = (self.data[1:,:,:] + self.data[:-1,:,:]) / 2
if (np.shape(self.data)[1]>self.ny):
self.data = (self.data[:,1:,:] + self.data[:,:-1,:]) / 2
if (np.shape(self.data)[2]>self.nz):
self.data = (self.data[:,:,1:] + self.data[:,:,:-1]) / 2
'''
except:
# Loading quantities
if self.verbose:
print('Loading composite variable',end="\r",flush=True)
self.data = load_noeos_quantities(self,var, **kargs)
if np.shape(self.data) == ():
self.data = load_quantities(self,var,PLASMA_QUANT='', CYCL_RES='',
COLFRE_QUANT='', COLFRI_QUANT='', IONP_QUANT='',
EOSTAB_QUANT='', TAU_QUANT='', DEBYE_LN_QUANT='',
CROSTAB_QUANT='', COULOMB_COL_QUANT='', AMB_QUANT='',
HALL_QUANT='', BATTERY_QUANT='', SPITZER_QUANT='',
KAPPA_QUANT='', GYROF_QUANT='', WAVE_QUANT='',
FLUX_QUANT='', CURRENT_QUANT='', COLCOU_QUANT='',
COLCOUMS_QUANT='', COLFREMX_QUANT='', **kargs)
# Loading arithmetic quantities
if np.shape(self.data) == ():
if self.verbose:
print('Loading arithmetic variable',end="\r",flush=True)
self.data = load_arithmetic_quantities(self,var, **kargs)
if document_vars.creating_vardict(self):
return None
elif var == '':
print(help(self.get_var))
print('VARIABLES USING CGS OR GENERIC NOMENCLATURE')
for ii in self.varn:
print('use ', ii,' for ',self.varn[ii])
if hasattr(self,'vardict'):
self.vardocs()
return None
return self.data
def genvar(self):
'''
Dictionary of original variables which will allow to convert to cgs.
'''
self.varn={}
self.varn['rho']= 'ro'
self.varn['tg'] = 'te'
self.varn['pg'] = 'pr'
self.varn['ux'] = 'vx'
self.varn['uy'] = 'vy'
self.varn['uz'] = 'vz'
self.varn['bx'] = 'bx'
self.varn['by'] = 'by'
self.varn['bz'] = 'bz'
def trans2comm(self,varname,snap=None):
'''
Transform the domain into a "common" format. All arrays will be 3D. The 3rd axis
is:
- for 3D atmospheres: the vertical axis
- for loop type atmospheres: along the loop
- for 1D atmosphere: the unique dimension is the 3rd axis.
At least one extra dimension needs to be created artifically.
All of them should obey the right hand rule
In all of them, the vectors (velocity, magnetic field etc) away from the Sun.
If applies, z=0 near the photosphere.
Units: everything is in cgs.
If an array is reverse, do ndarray.copy(), otherwise pytorch will complain.
'''
self.sel_units = 'cgs'
if varname[-1] in ['x','y','z']:
if varname[-1] == 'x':
varname=varname.replace(varname[len(varname)-1], 'y')
elif varname[-1] == 'y':
varname=varname.replace(varname[len(varname)-1], 'z')
else:
varname=varname.replace(varname[len(varname)-1], 'x')
self.order = np.array((1,2,0))
self.trans2commaxes()
return np.transpose(self.get_var(varname,snap=snap),
self.order).copy()
def trans2commaxes(self):
if self.transunits == False:
# including units conversion
axisarrs= np.array(((self.x),(self.y),(self.z)))
daxisarrs= np.array(((self.dx),(self.dy),(self.dz)))
self.x = axisarrs[self.order[0]].copy()
self.y = axisarrs[self.order[1]].copy()
self.z = axisarrs[self.order[2]].copy() + np.max(np.abs(axisarrs[self.order[2]]))
self.dx = daxisarrs[self.order[0]].copy()
self.dy = daxisarrs[self.order[1]].copy()
self.dz = -axisarrs[self.order[2]].copy()
self.dx1d, self.dy1d, self.dz1d = np.gradient(self.x).copy(), | np.gradient(self.y) | numpy.gradient |
# modified from original source by <NAME>
# source: https://github.com/leoxiaobin/deep-high-resolution-net.pytorch/blob/master/demo/inference.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import shutil
import json
from PIL import Image
from pycocotools.coco import COCO
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import cv2
import numpy as np
print("We are using torch version", torch.__version__)
print("We are using torchvision version", torchvision.__version__)
import sys
sys.path.append("./deep-high-resolution-net.pytorch/lib")
import time
from models import pose_hrnet
from config import cfg
from config import update_config
from core.inference import get_final_preds
from utils.transforms import get_affine_transform
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
import numpy as np
import cv2
import os
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
_BLACK = (0, 0, 0)
_RED = (0, 0, 255)
_BLUE = (255, 0, 0)
_PURPLE = (204, 0, 153)
_ORANGE = (51, 153, 255)
_LBROWN = (0, 153, 230)
keypoint_colors = { '1': _RED, '2': _RED, '3': _RED, '4': _RED, '5': _RED,
'6': _ORANGE, '7': _ORANGE, '8': _ORANGE, '9': _ORANGE,
'10': _LBROWN, '11': _LBROWN, '12': _LBROWN, '13': _LBROWN,
'14': _BLUE, '15': _BLUE, '16': _BLUE, '17': _BLUE,
'18': _PURPLE, '19': _PURPLE, '20': _PURPLE, '21': _PURPLE
}
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'hand',
]
def get_person_detection_boxes(model, img, threshold=0.5):
pil_image = Image.fromarray(img) # Load the image
transform = transforms.Compose([transforms.ToTensor()]) # Defing PyTorch Transform
transformed_img = transform(pil_image) # Apply the transform to the image
pred = model([transformed_img.to(CTX)]) # Pass the image to the model
# Use the first detected person
pred_classes = [COCO_INSTANCE_CATEGORY_NAMES[i]
for i in list(pred[0]['labels'].cpu().numpy())] # Get the Prediction Score
pred_boxes = [[(i[0], i[1]), (i[2], i[3])]
for i in list(pred[0]['boxes'].cpu().detach().numpy())] # Bounding boxes
pred_scores = list(pred[0]['scores'].cpu().detach().numpy())
person_boxes = []
# Select box has score larger than threshold and is person
for pred_class, pred_box, pred_score in zip(pred_classes, pred_boxes, pred_scores):
if (pred_score > threshold) and pred_class == 'hand':
person_boxes.append(pred_box)
return person_boxes
def get_pose_estimation_prediction(pose_model, image, centers, scales, transform):
rotation = 0
# pose estimation transformation
model_inputs = []
for center, scale in zip(centers, scales):
trans = get_affine_transform(center, scale, rotation, cfg.MODEL.IMAGE_SIZE)
# Crop smaller image of people
model_input = cv2.warpAffine(
image,
trans,
(int(cfg.MODEL.IMAGE_SIZE[0]), int(cfg.MODEL.IMAGE_SIZE[1])),
flags=cv2.INTER_LINEAR)
# hwc -> 1chw
model_input = transform(model_input)#.unsqueeze(0)
model_inputs.append(model_input)
# n * 1chw -> nchw
model_inputs = torch.stack(model_inputs)
# compute output heatmap
output = pose_model(model_inputs.to(CTX))
coords, _ = get_final_preds(
cfg,
output.cpu().detach().numpy(),
np.asarray(centers),
| np.asarray(scales) | numpy.asarray |
from mechanisms import Sequential, Parallel
from utils import create_agents, create_items, create_auctions, correct_values
import numpy as np
if __name__ == '__main__':
PARALLEL = False
COMMON = False
N_RUNS = 1000
QUANTILE = 0.005
STEP_SHARE = 0.05
VERBOSE = False
prices, regrets = [], []
for i in range(N_RUNS):
items = create_items(STEP_SHARE)
agents = create_agents()
### agents = {k: v.modify_capacities(0) for k, v in agents.items()}
### agents = {k: v.modify_values(0) for k, v in agents.items()}
agents = correct_values(agents, items, step_share=STEP_SHARE)
### items = {k: v.modify_reserve_price(0) for k, v in items.items()}
auctions = create_auctions(agents, items, COMMON)
auctions = {k: v.check_agents() for k, v in auctions.items()}
mechanism = Parallel(agents, items, auctions) if PARALLEL else Sequential(agents, items, auctions)
price = mechanism.run(VERBOSE)
prices.append(price)
regret = mechanism.get_regret()
regrets.append(regret)
low_price, avg_price, high_price = np.quantile(prices, QUANTILE), np.mean(prices), np.quantile(prices, 1 - QUANTILE)
low_price, avg_price, high_price = round(low_price/1000, 1), round(avg_price/1000, 1), round(high_price/1000, 1)
print('parallel' if PARALLEL else 'sequential', 'common:' if COMMON else 'individual',
'\nprice', avg_price, 'bln rub, ci [', low_price, ',', high_price, '] bln rub')
low_rgt, avg_rgt, high_rgt = np.quantile(regrets, QUANTILE), | np.mean(regrets) | numpy.mean |
import os
from math import radians
import numpy as np
from sklearn.metrics import pairwise_distances
import trackintel as ti
from trackintel.geogr.distances import meters_to_decimal_degrees, calculate_distance_matrix
class TestCalculate_distance_matrix:
def test_shape_for_different_array_length(self):
spts = ti.read_staypoints_csv(os.path.join('tests', 'data', 'geolife', 'geolife_staypoints.csv'))
x = spts.iloc[0:5]
y = spts.iloc[5:15]
d_euc1 = calculate_distance_matrix(X=x, Y=y, dist_metric='euclidean')
d_euc2 = calculate_distance_matrix(X=y, Y=x, dist_metric='euclidean')
d_hav1 = calculate_distance_matrix(X=x, Y=y, dist_metric='haversine')
d_hav2 = calculate_distance_matrix(X=y, Y=x, dist_metric='haversine')
assert d_euc1.shape == d_hav1.shape == (5, 10)
assert d_euc2.shape == d_hav2.shape == (10, 5)
assert np.isclose(0, np.sum(np.abs(d_euc1 - d_euc2.T)))
assert np.isclose(0, np.sum(np.abs(d_hav1 - d_hav2.T)))
def test_keyword_combinations(self):
spts = ti.read_staypoints_csv(os.path.join('tests', 'data', 'geolife', 'geolife_staypoints.csv'))
x = spts.iloc[0:5]
y = spts.iloc[5:15]
_ = calculate_distance_matrix(X=x, Y=y, dist_metric='euclidean', n_jobs=-1)
_ = calculate_distance_matrix(X=y, Y=x, dist_metric='haversine', n_jobs=-1)
d_mink1 = calculate_distance_matrix(X=x, Y=x, dist_metric='minkowski', p=1)
d_mink2 = calculate_distance_matrix(X=x, Y=x, dist_metric='minkowski', p=2)
d_euc = calculate_distance_matrix(X=x, Y=x, dist_metric='euclidean')
assert not | np.array_equal(d_mink1,d_mink2) | numpy.array_equal |
import itertools
import logging
import math
import random
from typing import Tuple, Optional
import click
import numpy as np
from shapely.affinity import rotate, translate
from shapely.geometry import MultiLineString, Polygon
from shapely.ops import unary_union
import vpype as vp
RectType = Tuple[float, float, float, float]
def rect_to_polygon(rect: RectType) -> Polygon:
return Polygon(
[
(rect[0], rect[1]),
(rect[0] + rect[2], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3]),
(rect[0], rect[1] + rect[3]),
]
)
def generate_fill(rect: RectType, pen_width: float) -> vp.LineCollection:
line_count = math.ceil(rect[3] / pen_width)
base_seg = np.array([pen_width / 2, rect[2] - pen_width / 2]) + rect[0]
y_start = rect[1] + (rect[3] - (line_count - 1) * pen_width) / 2
segs = []
for n in range(line_count):
seg = base_seg + (y_start + pen_width * n) * 1j
segs.append(seg if n % 2 == 0 else np.flip(seg))
return vp.LineCollection([np.hstack(segs)])
def generate_gradient(
rect: RectType, line: np.ndarray, density: float = 1.0
) -> vp.LineCollection:
"""Generate a random dots with a gradient density distribution. `density` is global average
number of point per square pixel
"""
n = int((rect[2] * rect[3]) * density)
orig = np.random.uniform(rect[0], rect[0] + rect[2], n) + 1j * np.random.triangular(
rect[1], rect[1], rect[1] + rect[3], n
)
lines = orig.reshape(n, 1) + line.reshape(1, len(line))
return vp.LineCollection(lines)
def generate_dot_gradient(
rect: RectType, pen_width: float, density: float = 1.0
) -> vp.LineCollection:
"""Generate a random dots with a gradient density distribution. `density` is global average
number of point per square pixel
"""
return generate_gradient(rect, | np.array([0, pen_width * 0.05]) | numpy.array |
#!/usr/bin/env python
# Copyright 2021
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import ttest_ind
import netCDF4 as nc
import pickle
import os
from PIL import Image as PIL_Image
import sys
import shutil
import glob
import datetime
import time
import calendar
from numpy import genfromtxt
from scipy.optimize import curve_fit
from scipy.cluster.vq import kmeans,vq
from scipy.interpolate import interpn, interp1d
from math import e as e_constant
import math
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.collections import LineCollection
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib
import warnings
warnings.filterwarnings("ignore")
plt.style.use('classic')
# font size
# font_size = 14
# matplotlib.rc('font', **{'family': 'serif', 'serif': ['Arial'], 'size': font_size})
# matplotlib.rc('font', weight='bold')
p_progress_writing = False
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
time_format = '%d-%m-%Y_%H:%M'
time_format_khan = '%Y%m%d.0%H'
time_format_mod = '%Y-%m-%d_%H:%M:%S'
time_format_twolines = '%H:%M\n%d-%m-%Y'
time_format_twolines_noYear_noMin_intMonth = '%H\n%d-%m'
time_format_twolines_noYear = '%H:%M\n%d-%b'
time_format_twolines_noYear_noMin = '%H\n%d-%b'
time_format_date = '%Y-%m-%d'
time_format_time = '%H:%M:%S'
time_format_parsivel = '%Y%m%d%H%M'
time_format_parsivel_seconds = '%Y%m%d%H%M%S'
time_str_formats = [
time_format,
time_format_mod,
time_format_twolines,
time_format_twolines_noYear,
time_format_date,
time_format_time,
time_format_parsivel
]
default_cm = cm.jet
cm_vir = cm.viridis
listed_cm_colors_list = ['silver', 'red', 'green', 'yellow', 'blue', 'black']
listed_cm = ListedColormap(listed_cm_colors_list, 'indexed')
colorbar_tick_labels_list_cloud_phase = ['Clear', 'Water', 'SLW', 'Mixed', 'Ice', 'Unknown']
listed_cm_colors_list_cloud_phase = ['white', 'red', 'green', 'yellow', 'blue', 'purple']
listed_cm_cloud_phase = ListedColormap(listed_cm_colors_list_cloud_phase, 'indexed')
avogadros_ = 6.022140857E+23 # molecules/mol
gas_const = 83144.598 # cm3 mbar k-1 mol-1
gas_const_2 = 8.3144621 # J mol-1 K-1
gas_const_water = 461 # J kg-1 K-1
gas_const_dry = 287 # J kg-1 K-1
boltzmann_ = gas_const / avogadros_ # cm3 mbar / k molecules
gravity_ = 9.80665 # m/s
poisson_ = 2/7 # for dry air (k)
latent_heat_v = 2.501E+6 # J/kg
latent_heat_f = 3.337E+5 # J/kg
latent_heat_s = 2.834E+6 # J/kg
heat_capacity__Cp = 1005.7 # J kg-1 K-1 dry air
heat_capacity__Cv = 719 # J kg-1 K-1 water vapor
Rs_da = 287.05 # Specific gas const for dry air, J kg^{-1} K^{-1}
Rs_v = 461.51 # Specific gas const for water vapour, J kg^{-1} K^{-1}
Cp_da = 1004.6 # Specific heat at constant pressure for dry air
Cv_da = 719. # Specific heat at constant volume for dry air
Cp_v = 1870. # Specific heat at constant pressure for water vapour
Cv_v = 1410. # Specific heat at constant volume for water vapour
Cp_lw = 4218 # Specific heat at constant pressure for liquid water
Epsilon = 0.622 # Epsilon=Rs_da/Rs_v; The ratio of the gas constants
degCtoK = 273.15 # Temperature offset between K and C (deg C)
rho_w = 1000. # Liquid Water density kg m^{-3}
grav = 9.80665 # Gravity, m s^{-2}
Lv = 2.5e6 # Latent Heat of vaporisation
boltzmann = 5.67e-8 # Stefan-Boltzmann constant
mv = 18.0153e-3 # Mean molar mass of water vapor(kg/mol)
m_a = 28.9644e-3 # Mean molar mass of air(kg/mol)
Rstar_a = 8.31432 # Universal gas constant for air (N m /(mol K))
path_output = '/g/data/k10/la6753/'
# Misc
class Object_create(object):
pass
def list_files_recursive(path_, filter_str=None):
# create list of raw spectra files
file_list = []
# r=root, d=directories, f = files
if filter_str is None:
for r, d, f in os.walk(path_):
for file in f:
file_list.append(os.path.join(r, file))
else:
for r, d, f in os.walk(path_):
for file in f:
if filter_str in file:
file_list.append(os.path.join(r, file))
return file_list
def list_files(path_, filter_str='*'):
file_list = sorted(glob.glob(str(path_ + filter_str)))
return file_list
def coincidence(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
check_[check_ == check_] = 1
arr_1_checked = arr_1 * check_
arr_2_checked = arr_2 * check_
return arr_1_checked[~np.isnan(arr_1_checked)], arr_2_checked[~np.isnan(arr_2_checked)]
def array_2d_fill_gaps_by_interpolation_linear(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
output_array_X[r_, :][np.isnan(array_[r_, :])] = np.interp(
col_index[np.isnan(array_[r_, :])], coin_out[0], coin_out[1])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
output_array_Y[:, c_][np.isnan(array_[:, c_])] = np.interp(
row_index[np.isnan(array_[:, c_])], coin_out[0], coin_out[1])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def array_2d_fill_gaps_by_interpolation_cubic(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_X[r_, :][np.isnan(array_[r_, :])] = interp_function(col_index[np.isnan(array_[r_, :])])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_Y[:, c_][np.isnan(array_[:, c_])] = interp_function(row_index[np.isnan(array_[:, c_])])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def combine_2_time_series(time_1_reference, data_1, time_2, data_2,
forced_time_step=None, forced_start_time=None, forced_stop_time=None,
cumulative_var_1=False, cumulative_var_2=False):
"""
takes two data sets with respective time series, and outputs the coincident stamps from both data sets
It does this by using mean_discrete() for both sets with the same start stamp and averaging time, the averaging time
is the forced_time_step
:param time_1_reference: 1D array, same units as time_2, this series will define the returned time step reference
:param data_1: can be 1D or 2D array, first dimention most be same as time_1
:param time_2: 1D array, same units as time_1
:param data_2: can be 1D or 2D array, first dimention most be same as time_2
:param window_: optional, if 0 (default) the values at time_1 and time_2 most match exactly, else, the match can
be +- window_
:param forced_time_step: if not none, the median of the differential of the time_1_reference will be used
:param forced_start_time: if not none, the returned series will start at this time stamp
:param forced_stop_time: if not none, the returned series will stop at this time stamp
:param cumulative_var_1: True is you want the variable to be accumulated instead of means, only of 1D data
:param cumulative_var_2: True is you want the variable to be accumulated instead of means, only of 1D data
:return: Index_averaged_1: 1D array, smallest coincident time, without time stamp gaps
:return: Values_mean_1: same shape as data_1 both according to Index_averaged_1 times
:return: Values_mean_2: same shape as data_2 both according to Index_averaged_1 times
"""
# define forced_time_step
if forced_time_step is None:
forced_time_step = np.median(np.diff(time_1_reference))
# find time period
if forced_start_time is None:
first_time_stamp = max(np.nanmin(time_1_reference), np.nanmin(time_2))
else:
first_time_stamp = forced_start_time
if forced_stop_time is None:
last_time_stamp = min(np.nanmax(time_1_reference), np.nanmax(time_2))
else:
last_time_stamp = forced_stop_time
# do the averaging
print('starting averaging of data 1')
if cumulative_var_1:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
print('starting averaging of data 2')
if cumulative_var_2:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
# check that averaged indexes are the same
if np.nansum(np.abs(Index_averaged_1 - Index_averaged_2)) != 0:
print('error during averaging of series, times do no match ????')
return None, None, None
# return the combined, trimmed data
return Index_averaged_1, Values_mean_1, Values_mean_2
def split_str_chunks(s, n):
"""Produce `n`-character chunks from `s`."""
out_list = []
for start in range(0, len(s), n):
out_list.append(s[start:start+n])
return out_list
def coincidence_multi(array_list):
# only coincidences
parameters_list = array_list
check_ = parameters_list[0]
for param_ in parameters_list[1:]:
check_ = check_ * param_
check_[check_ == check_] = 1
new_arr_list = []
for param_ in parameters_list:
new_arr_list.append(param_ * check_)
check_ = check_ * param_
# delete empty rows_
list_list = []
for param_ in parameters_list:
t_list = []
for i in range(check_.shape[0]):
if check_[i] == check_[i]:
t_list.append(param_[i])
list_list.append(t_list)
# concatenate
ar_list = []
for ii in range(len(parameters_list)):
ar_list.append(np.array(list_list[ii]))
return ar_list
def coincidence_zero(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
# delete empty rows_
list_1 = []
list_2 = []
for i in range(check_.shape[0]):
if check_[i] != 0:
list_1.append(arr_1[i])
list_2.append(arr_2[i])
return np.array(list_1),np.array(list_2)
def discriminate(X_, Y_, Z_, value_disc_list, discrmnt_invert_bin = False):
if discrmnt_invert_bin:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ > value_disc_list[0]] = np.nan
Z_mask[Z_ >= value_disc_list[1]] = 1
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
else:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ < value_disc_list[0]] = np.nan
Z_mask[Z_ > value_disc_list[1]] = np.nan
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
return X_new, Y_new
def add_ratio_to_values(header_, values_, nominator_index, denominator_index, ratio_name, normalization_value=1.):
nominator_data = values_[:,nominator_index]
denominator_data = values_[:,denominator_index]
ratio_ = normalization_value * nominator_data / denominator_data
values_new = np.column_stack((values_,ratio_))
header_new = np.append(header_,ratio_name)
return header_new, values_new
def bin_data(x_val_org,y_val_org, start_bin_edge=0, bin_size=1, min_bin_population=1):
# get coincidences only
x_val,y_val = coincidence(x_val_org,y_val_org)
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(x_val.shape[0]-1):
if x_val[x]==x_val[x] and x_val[x+1]==x_val[x+1]:
if x_val[x+1] < x_val[x]:
always_ascending = 0
if always_ascending == 0:
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
last_row = 0
last_row_temp = last_row
while start_bin_edge <= np.nanmax(x_val):
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size:
last_row_temp = row_
break
x_binned.append(start_bin_edge)
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size
last_row = last_row_temp
# add series
if bin_size >= 1:
x_binned_int = np.array(x_binned, dtype=int)
else:
x_binned_int = x_binned
return x_binned_int, y_binned
def shiftedColorMap(cmap, midpoint=0.5, name='shiftedcmap'):
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(0, 1, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def student_t_test(arr_1, arr_2):
return ttest_ind(arr_1, arr_2, nan_policy='omit')
def k_means_clusters(array_, cluster_number, forced_centers=None):
if forced_centers is None:
centers_, x = kmeans(array_,cluster_number)
data_id, x = vq(array_, centers_)
return centers_, data_id
else:
data_id, x = vq(array_, forced_centers)
return forced_centers, data_id
def grid_(x, y, z, resX=100, resY=100):
"Convert 3 column data to matplotlib grid"
xi = np.linspace(min(x), max(x), resX)
yi = np.linspace(min(y), max(y), resY)
Z = matplotlib.mlab.griddata(x, y, z, xi, yi)
X, Y = np.meshgrid(xi, yi)
return X, Y, Z
def find_max_index_2d_array(array_):
return np.unravel_index(np.argmax(array_, axis=None), array_.shape)
def find_min_index_2d_array(array_):
return np.unravel_index(np.argmin(array_, axis=None), array_.shape)
def find_max_index_1d_array(array_):
return np.argmax(array_, axis=None)
def find_min_index_1d_array(array_):
return np.argmin(array_, axis=None)
def time_series_interpolate_discrete(Index_, Values_, index_step, first_index,
position_=0., last_index=None):
"""
this will average values from Values_ that are between Index_[n:n+avr_size)
:param Index_: n by 1 numpy array to look for position,
:param Values_: n by m numpy array, values to be averaged
:param index_step: in same units as Index_
:param first_index: is the first discrete index on new arrays.
:param position_: will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
:param last_index: in case you want to force the returned series to some fixed period/length
:return: Index_averaged, Values_averaged
"""
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
print('error during shape check! Index_.shape[0] != Values_.shape[0]')
return None, None
if Index_[-1] < first_index:
print('error during shape check! Index_[-1] < first_index')
return None, None
# initialize output matrices
if last_index is None:
final_index = np.nanmax(Index_)
else:
final_index = last_index
total_averaged_rows = int((final_index-first_index) / index_step) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_interp = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_interp[r_] = first_index + (r_ * index_step)
Index_interp -= (position_ * index_step)
Values_interp = np.interp(Index_interp, Index_, Values_)
Index_interp = Index_interp + (position_ * index_step)
return Index_interp, Values_interp
def array_2D_sort_ascending_by_column(array_, column_=0):
array_sorted = array_[array_[:, column_].argsort()]
return array_sorted
def get_ax_range(ax):
x_1 = ax.axis()[0]
x_2 = ax.axis()[1]
y_1 = ax.axis()[2]
y_2 = ax.axis()[3]
return x_1, x_2, y_1, y_2
def get_array_perimeter_only(array_):
return np.concatenate([array_[0, :-1], array_[:-1, -1], array_[-1, ::-1], array_[-2:0:-1, 0]])
# WRF
def wrf_var_search(wrf_nc_file, description_str):
description_str_lower = description_str.lower()
for var_ in sorted(wrf_nc_file.variables):
try:
if description_str_lower in wrf_nc_file.variables[var_].description.lower():
print(var_, '|', wrf_nc_file.variables[var_].description)
except:
pass
def create_virtual_sonde_from_wrf(sonde_dict, filelist_wrf_output,
wrf_filename_time_format = 'wrfout_d03_%Y-%m-%d_%H_%M_%S'):
# create time array
filelist_wrf_output_noPath = []
for filename_ in filelist_wrf_output:
filelist_wrf_output_noPath.append(filename_.split('/')[-1])
wrf_time_file_list = np.array(time_str_to_seconds(filelist_wrf_output_noPath, wrf_filename_time_format))
# create lat and lon arrays
wrf_domain_file = nc.Dataset(filelist_wrf_output[0])
# p(sorted(wrf_domain_file.variables))
# wrf_vars = sorted(wrf_domain_file.variables)
# for i_ in range(len(wrf_vars)):
# try:
# print(wrf_vars[i_], '\t\t', wrf_domain_file.variables[wrf_vars[i_]].description)
# except:
# print(wrf_vars[i_])
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
wrf_lat_U = wrf_domain_file.variables['XLAT_U'][0, :, :].filled(np.nan)
wrf_lon_U = wrf_domain_file.variables['XLONG_U'][0, :, :].filled(np.nan)
wrf_lat_V = wrf_domain_file.variables['XLAT_V'][0, :, :].filled(np.nan)
wrf_lon_V = wrf_domain_file.variables['XLONG_V'][0, :, :].filled(np.nan)
wrf_domain_file.close()
# load sonde's profile
sonde_hght = sonde_dict['hght'] # m ASL
sonde_pres = sonde_dict['pres'] # hPa
sonde_time = sonde_dict['time'] # seconds since epoc
sonde_lati = sonde_dict['lati'] # degrees
sonde_long = sonde_dict['long'] # degrees
# create output lists of virtual sonde
list_p__ = []
list_hgh = []
list_th_ = []
list_th0 = []
list_qv_ = []
list_U__ = []
list_V__ = []
list_tim = []
list_lat = []
list_lon = []
wrf_point_abs_address_old = 0
# loop thru real sonde's points
for t_ in range(sonde_hght.shape[0]):
p_progress_bar(t_, sonde_hght.shape[0])
point_hght = sonde_hght[t_]
point_pres = sonde_pres[t_]
point_time = sonde_time[t_]
point_lati = sonde_lati[t_]
point_long = sonde_long[t_]
# find closest cell via lat, lon
index_tuple = find_index_from_lat_lon_2D_arrays(wrf_lat,wrf_lon, point_lati,point_long)
index_tuple_U = find_index_from_lat_lon_2D_arrays(wrf_lat_U,wrf_lon_U, point_lati,point_long)
index_tuple_V = find_index_from_lat_lon_2D_arrays(wrf_lat_V,wrf_lon_V, point_lati,point_long)
# find closest file via time
file_index = time_to_row_sec(wrf_time_file_list, point_time)
# open wrf file
wrf_domain_file = nc.Dataset(filelist_wrf_output[file_index])
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, index_tuple[0], index_tuple[1]].data +
wrf_domain_file.variables['P'][0, :, index_tuple[0], index_tuple[1]].data) / 100 # hPa
# find closest model layer via pressure
layer_index = find_min_index_1d_array(np.abs(wrf_press - point_pres))
# define point absolute address and check if it is a new point
wrf_point_abs_address_new = (index_tuple[0], index_tuple[1], file_index, layer_index)
if wrf_point_abs_address_new != wrf_point_abs_address_old:
wrf_point_abs_address_old = wrf_point_abs_address_new
# get wrf data
index_tuple_full = (0, layer_index, index_tuple[0], index_tuple[1])
index_tuple_full_U = (0, layer_index, index_tuple_U[0], index_tuple_U[1])
index_tuple_full_V = (0, layer_index, index_tuple_V[0], index_tuple_V[1])
# save to arrays
list_p__.append(float(wrf_press[layer_index]))
list_hgh.append(float(point_hght))
list_th_.append(float(wrf_domain_file.variables['T'][index_tuple_full]))
list_th0.append(float(wrf_domain_file.variables['T00'][0]))
list_qv_.append(float(wrf_domain_file.variables['QVAPOR'][index_tuple_full]))
list_U__.append(float(wrf_domain_file.variables['U'][index_tuple_full_U]))
list_V__.append(float(wrf_domain_file.variables['V'][index_tuple_full_V]))
list_tim.append(float(wrf_time_file_list[file_index]))
list_lat.append(float(wrf_lat[index_tuple[0], index_tuple[1]]))
list_lon.append(float(wrf_lon[index_tuple[0], index_tuple[1]]))
wrf_domain_file.close()
# convert lists to arrays
array_p__ = np.array(list_p__)
array_hgh = np.array(list_hgh)
array_th_ = np.array(list_th_)
array_th0 = np.array(list_th0)
array_qv_ = np.array(list_qv_)
array_U__ = np.array(list_U__)
array_V__ = np.array(list_V__)
array_tim = np.array(list_tim)
array_lat = np.array(list_lat)
array_lon = np.array(list_lon)
# calculate derivative variables
wrf_temp_K = calculate_temperature_from_potential_temperature(array_th_ + array_th0, array_p__)
wrf_temp_C = kelvin_to_celsius(wrf_temp_K)
wrf_e = MixR2VaporPress(array_qv_, array_p__*100)
wrf_td_C = DewPoint(wrf_e)
wrf_td_C[wrf_td_C > wrf_temp_C] = wrf_temp_C[wrf_td_C > wrf_temp_C]
wrf_RH = calculate_RH_from_QV_T_P(array_qv_, wrf_temp_K, array_p__*100)
wrf_WD, wrf_WS = cart_to_polar(array_V__, array_U__)
wrf_WD_met = wrf_WD + 180
wrf_WD_met[wrf_WD_met >= 360] = wrf_WD_met[wrf_WD_met >= 360] - 360
wrf_WS_knots = ws_ms_to_knots(wrf_WS)
# create virtual sonde dict
wrf_sonde_dict = {}
wrf_sonde_dict['hght'] = array_hgh
wrf_sonde_dict['pres'] = array_p__
wrf_sonde_dict['temp'] = wrf_temp_C
wrf_sonde_dict['dwpt'] = wrf_td_C
wrf_sonde_dict['sknt'] = wrf_WS_knots
wrf_sonde_dict['drct'] = wrf_WD_met
wrf_sonde_dict['relh'] = wrf_RH
wrf_sonde_dict['time'] = array_tim
wrf_sonde_dict['lati'] = array_lat
wrf_sonde_dict['long'] = array_lon
return wrf_sonde_dict
def wrf_get_temp_K(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
wrf_theta = (wrf_domain_file.variables['T'][0, :, :, :].data +
wrf_domain_file.variables['T00'][0].data) # K
wrf_temp_K = calculate_temperature_from_potential_temperature(wrf_theta, wrf_press)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_temp_K
def wrf_get_press_hPa(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
if original_arg_type_str:
wrf_domain_file.close()
return wrf_press
def wrf_get_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,:-1,:,:].data +
wrf_domain_file.variables['PHB'][0,:-1,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_terrain_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,0,:,:].data +
wrf_domain_file.variables['PHB'][0,0,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_water_vapor_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QVAPOR = wrf_domain_file.variables['QVAPOR'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QVAPOR
def wrf_get_cloud_water_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QCLOUD = wrf_domain_file.variables['QCLOUD'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QCLOUD
def wrf_get_ice_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QICE = wrf_domain_file.variables['QICE'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QICE
def wrf_get_lat_lon(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_lat, wrf_lon
def wrf_rename_files_fix_time_format(filename_original_list, original_character=':', replacement_character='_'):
for i_, filename_ in enumerate(filename_original_list):
p_progress_bar(i_, len(filename_original_list))
new_filename = filename_.replace(original_character,replacement_character)
os.rename(filename_, new_filename)
# meteorology
def calculate_saturation_vapor_pressure_wexler(T_array_K):
# result in mb (hPa)
G0 = -0.29912729E+4
G1 = -0.60170128E+4
G2 = 0.1887643854E+2
G3 = -0.28354721E-1
G4 = 0.17838301E-4
G5 = -0.84150417E-9
G6 = 0.44412543E-12
G7 = 0.2858487E+1
e_s = np.exp((G0 * (T_array_K ** -2)) +
(G1 * (T_array_K ** -1)) +
G2 +
(G3 * T_array_K) +
(G4 * (T_array_K ** 2)) +
(G5 * (T_array_K ** 3)) +
(G6 * (T_array_K ** 4)) +
(G7 * np.log(T_array_K)))
return e_s * 0.01
def calculate_saturation_mixing_ratio(P_array_mb, T_array_K):
e_s = calculate_saturation_vapor_pressure_wexler(T_array_K)
q_s = 621.97 * (e_s / (P_array_mb - e_s))
return q_s
def calculate_potential_temperature(T_array_K, P_array_hPa):
potential_temp = T_array_K * ((1000 / P_array_hPa) ** poisson_)
return potential_temp
def calculate_equivalent_potential_temperature(T_array_K, P_array_hPa, R_array_kg_over_kg):
P_o = 1000
T_e = T_array_K + (latent_heat_v * R_array_kg_over_kg / heat_capacity__Cp)
theta_e = T_e * ((P_o/P_array_hPa)**poisson_)
return theta_e
def calculate_temperature_from_potential_temperature(theta_array_K, P_array_hPa):
temperature_ = theta_array_K * ( (P_array_hPa/1000) ** poisson_ )
return temperature_
def calculate_mountain_height_from_sonde(sonde_dict):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
"""
# Set initial conditions
height = 1000 # metres
# define arrays
WS_array = ws_knots_to_ms(sonde_dict['SKNT'])
U_array, V_array = polar_to_cart(sonde_dict['DRCT'], WS_array)
T_array = celsius_to_kelvin(sonde_dict['TEMP'])
RH_array = sonde_dict['RELH']
P_array = sonde_dict['PRES']
Z_array = sonde_dict['HGHT']
Q_array = sonde_dict['MIXR']/1000
TH_array = sonde_dict['THTA']
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# gradients
d_ln_TH = np.gradient(np.log(TH_array))
d_z = np.gradient(Z_array)
d_q_s = np.gradient(q_s)
# Dry Brunt - Vaisala
N_dry = gravity_ * d_ln_TH / d_z
N_dry[RH_array >= 90] = 0
# Moist Brunt - Vaisala
term_1_1 = 1 + ( latent_heat_v * q_s / (gas_const_dry * T_array) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * q_s / (heat_capacity__Cp * gas_const_dry * (T_array**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * T_array)
term_2_3 = d_q_s / d_z
term_3 = d_q_s / d_z # should be d_q_w but sonde data has no cloud water data
N_moist = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
N_moist[RH_array < 90] = 0
# define output array
N_2 = (N_dry + N_moist)**2
H_hat_2 = N_2 * (height**2) / (U_array**2)
return H_hat_2
def calculate_mountain_height_from_era5(era5_pressures_filename, era5_surface_filename, point_lat, point_lon,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
time_start_str_YYYYmmDDHHMM='',time_stop_str_YYYYmmDDHHMM='',
reference_height=1000, return_debug_arrays=False):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
"""
# load files
era5_sur = nc.Dataset(era5_surface_filename, 'r')
era5_pre = nc.Dataset(era5_pressures_filename, 'r')
# check if times are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['time'][:] - era5_sur.variables['time'][:]))
if dif_sum > 0:
print('Error, times in selected files are not the same')
return
# check if lat lon are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['latitude'][:] - era5_sur.variables['latitude'][:]))
dif_sum = dif_sum + np.sum(np.abs(era5_pre.variables['longitude'][:] - era5_sur.variables['longitude'][:]))
if dif_sum > 0:
print('Error, latitude or longitude in selected files are not the same')
return
# find lat lon index
lat_index, lon_index = find_index_from_lat_lon(era5_sur.variables['latitude'][:],
era5_sur.variables['longitude'][:], [point_lat], [point_lon])
lat_index = lat_index[0]
lon_index = lon_index[0]
# copy arrays
time_array = time_era5_to_seconds(np.array(era5_sur.variables['time'][:]))
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM != '':
r_1 = time_to_row_str(time_array, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM != '':
r_2 = time_to_row_str(time_array, time_stop_str_YYYYmmDDHHMM)
time_array = time_array[r_1:r_2]
sp_array = np.array(era5_sur.variables['sp'][r_1:r_2, lat_index, lon_index]) / 100 # hPa
P_array = np.array(era5_pre.variables['level'][:]) # hPa
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]).flatten(),
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]).flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((sp_array.shape[0], P_array.shape[0]))
else:
if u_wind_mode == 'u':
U_array = np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index])
else:
U_array = np.sqrt(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]) ** 2 +
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]) ** 2)
T_array = np.array(era5_pre.variables['t'][r_1:r_2, :, lat_index, lon_index])
Q_L_array = np.array(era5_pre.variables['crwc'][r_1:r_2, :, lat_index, lon_index])
RH_array = np.array(era5_pre.variables['r'][r_1:r_2, :, lat_index, lon_index])
Z_array = np.array(era5_pre.variables['z'][r_1:r_2, :, lat_index, lon_index]) / gravity_
# calculate arrays
TH_array = np.zeros((time_array.shape[0], P_array.shape[0]), dtype=float)
for t_ in range(time_array.shape[0]):
TH_array[t_,:] = calculate_potential_temperature(T_array[t_,:], P_array[:])
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output dict
H_hat_2 = {}
# loop tru time stamps
for t_ in range(time_array.shape[0]):
p_progress_bar(t_,time_array.shape[0])
# find surface pressure at this time stamp
surface_p = sp_array[t_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[t_, :], P_array)
pressure_1000m_index = np.argmin(np.abs(P_array - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(P_array), Q_L_array[t_, :])
z__0 = np.interp(np.log(surface_p), np.log(P_array), Z_array[t_, :])
th_0 = np.interp(np.log(surface_p), np.log(P_array), TH_array[t_, :])
qs_0 = np.interp(np.log(surface_p), np.log(P_array), q_s[t_, :])
t__1000 = np.interp(reference_height, Z_array[t_, :], T_array[t_, :])
u__1000 = np.interp(reference_height, Z_array[t_, :], U_array[t_, :])
ql_1000 = np.interp(reference_height, Z_array[t_, :], Q_L_array[t_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[t_, :], TH_array[t_, :])
qs_1000 = np.interp(reference_height, Z_array[t_, :], q_s[t_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[t_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[time_array[t_]] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
era5_sur.close()
era5_pre.close()
if return_arrays:
H_hat_2_time = sorted(H_hat_2.keys())
H_hat_2_time = np.array(H_hat_2_time)
H_hat_2_vals = np.zeros(H_hat_2_time.shape[0], dtype=float)
for r_ in range(H_hat_2_time.shape[0]):
H_hat_2_vals[r_] = H_hat_2[H_hat_2_time[r_]]
if return_debug_arrays:
return H_hat_2_time, H_hat_2_vals, N_2, u__1000 ** 2
else:
return H_hat_2_time, H_hat_2_vals
else:
return H_hat_2
def calculate_mountain_height_from_WRF(filename_SP, filename_PR,
filename_UU, filename_VV,
filename_TH, filename_QR,
filename_QV, filename_PH,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
reference_height=1000):
"""
calculates H_hat from WRF point output text files
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
:param filename_SP: fullpath filename of surface pressure file
:param filename_PR: fullpath filename of pressure file
:param filename_UU: fullpath filename of u wind file
:param filename_VV: fullpath filename of v wind file
:param filename_TH: fullpath filename of potential temperature file
:param filename_QR: fullpath filename of rain water mixing ratio file
:param filename_QV: fullpath filename of Water vapor mixing ratio file
:param filename_PH: fullpath filename of geopotential height file
:param return_arrays: if true, will return also brunt vaisalla and wind component squared
:param u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
:param range_line_degrees: if not None, u_wind_mode will automatically be set to normal_to_range
:param reference_height: mean height of mountain range
:return:
H_hat_2
"""
# load arrays from text
SP_array = genfromtxt(filename_SP, dtype=float, skip_header=1)[:,9] / 100 # hPa
PR_array = genfromtxt(filename_PR, dtype=float, skip_header=1)[:,1:] / 100 # hPa
UU_array = genfromtxt(filename_UU, dtype=float, skip_header=1)[:,1:]
VV_array = genfromtxt(filename_VV, dtype=float, skip_header=1)[:,1:]
TH_array = genfromtxt(filename_TH, dtype=float, skip_header=1)[:,1:]
QR_array = genfromtxt(filename_QR, dtype=float, skip_header=1)[:,1:]
QV_array = genfromtxt(filename_QV, dtype=float, skip_header=1)[:,1:]
Z_array = genfromtxt(filename_PH, dtype=float, skip_header=1)[:,1:] # already in meters
# calculate arrays
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(UU_array.flatten(), VV_array.flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((UU_array.shape[0], UU_array.shape[1]))
else:
if u_wind_mode == 'u':
U_array = UU_array
else:
U_array = np.sqrt(UU_array ** 2 + VV_array ** 2)
T_array = calculate_temperature_from_potential_temperature(TH_array, PR_array)
RH_array = calculate_RH_from_QV_T_P(QV_array, T_array, PR_array*100)
q_s = calculate_saturation_mixing_ratio(PR_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output array
H_hat_2 = np.zeros(PR_array.shape[0], dtype=float)
# loop tru time stamps
for r_ in range(PR_array.shape[0]):
p_progress_bar(r_, PR_array.shape[0])
# find surface pressure at this time stamp
surface_p = SP_array[r_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[r_, :], PR_array[r_, :])
pressure_1000m_index = np.argmin(np.abs(PR_array[r_, :] - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), QR_array[r_, :])
z__0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), Z_array[r_, :])
th_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), TH_array[r_, :])
qs_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), q_s[r_, :])
t__1000 = np.interp(reference_height, Z_array[r_, :], T_array[r_, :])
u__1000 = np.interp(reference_height, Z_array[r_, :], U_array[r_, :])
ql_1000 = np.interp(reference_height, Z_array[r_, :], QR_array[r_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[r_, :], TH_array[r_, :])
qs_1000 = np.interp(reference_height, Z_array[r_, :], q_s[r_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[r_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[r_] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
if return_arrays:
return H_hat_2, N_2, u__1000 ** 2
else:
return H_hat_2
def calculate_dewpoint_from_T_RH(T_, RH_):
"""
from Magnus formula, using Bolton's constants
:param T_: ambient temperature [Celsius]
:param RH_: relative humidity
:return: Td_ dew point temperature [celsius]
"""
a = 6.112
b = 17.67
c = 243.5
y_ = np.log(RH_/100) + ((b*T_)/(c+T_))
Td_ = (c * y_) / (b - y_)
return Td_
def calculate_RH_from_QV_T_P(arr_qvapor, arr_temp_K, arr_press_Pa):
tv_ = 6.11 * e_constant**((2500000/461) * ((1/273) - (1/arr_temp_K)))
pv_ = arr_qvapor * (arr_press_Pa/100) / (arr_qvapor + 0.622)
return np.array(100 * pv_ / tv_)
def calculate_profile_input_for_cluster_analysis_from_ERA5(p_profile, t_profile, td_profile, q_profile,
u_profile, v_profile, h_profile, surface_p):
"""
takes data from ERA5 for only one time stamp for all pressure levels from 250 to 1000 hPa
:param p_profile: in hPa
:param t_profile: in Celsius
:param td_profile: in Celsius
:param q_profile: in kg/kg
:param u_profile: in m/s
:param v_profile: in m/s
:param h_profile: in m
:param surface_p: in hPa
:return: surface_p, qv_, qu_, tw_, sh_, tt_
"""
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_q = np.interp(np.log(surface_p), np.log(p_profile), q_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
Q_array[:levels_total] = q_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
Q_array[-1] = surface_q
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
######################################
r_850 = np.argmin(np.abs(P_array - 850))
r_500 = np.argmin(np.abs(P_array - 500))
dp_ = np.abs(np.gradient(P_array))
tt_ = (T_array[r_850] - (2 * T_array[r_500]) + Td_array[r_850])
qu_ = np.sum(Q_array * U_array * dp_) / gravity_
qv_ = np.sum(Q_array * V_array * dp_) / gravity_
tw_ = np.sum(Q_array * dp_) / gravity_
del_u = U_array[r_850] - U_array[r_500]
del_v = V_array[r_850] - V_array[r_500]
del_z = H_array[r_850] - H_array[r_500]
sh_ = ((del_u / del_z) ** 2 + (del_v / del_z) ** 2) ** 0.5
return surface_p, qv_, qu_, tw_, sh_, tt_
def barometric_equation(presb_pa, tempb_k, deltah_m, Gamma=-0.0065):
"""The barometric equation models the change in pressure with
height in the atmosphere.
INPUTS:
presb_k (pa): The base pressure
tempb_k (K): The base temperature
deltah_m (m): The height differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
pres (pa): Pressure at the requested level
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return presb_pa * \
(tempb_k/(tempb_k+Gamma*deltah_m))**(grav*m_a/(Rstar_a*Gamma))
def barometric_equation_inv(heightb_m, tempb_k, presb_pa,
prest_pa, Gamma=-0.0065):
"""The barometric equation models the change in pressure with height in
the atmosphere. This function returns altitude given
initial pressure and base altitude, and pressure change.
INPUTS:
heightb_m (m):
presb_pa (pa): The base pressure
tempb_k (K) : The base temperature
deltap_pa (m): The pressure differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
heightt_m
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return heightb_m + \
tempb_k * ((presb_pa/prest_pa)**(Rstar_a*Gamma/(grav*m_a))-1) / Gamma
def Theta(tempk, pres, pref=100000.):
"""Potential Temperature
INPUTS:
tempk (K)
pres (Pa)
pref: Reference pressure (default 100000 Pa)
OUTPUTS: Theta (K)
Source: Wikipedia
Prints a warning if a pressure value below 2000 Pa input, to ensure
that the units were input correctly.
"""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return tempk * (pref/pres)**(Rs_da/Cp_da)
def TempK(theta, pres, pref=100000.):
"""Inverts Theta function."""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return theta * (pres/pref)**(Rs_da/Cp_da)
def ThetaE(tempk, pres, e):
"""Calculate Equivalent Potential Temperature
for lowest model level (or surface)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
OUTPUTS:
theta_e: equivalent potential temperature
References:
Eq. (9.40) from Holton (2004)
Eq. (22) from Bolton (1980)
<NAME> and <NAME> (2013), 'Land-Ocean Warming
Contrast over a Wide Range of Climates: Convective Quasi-Equilibrium
Theory and Idealized Simulations', J. Climate """
# tempc
tempc = tempk - degCtoK
# Calculate theta
theta = Theta(tempk, pres)
# T_lcl formula needs RH
es = VaporPressure(tempc)
RH = 100. * e / es
# theta_e needs q (water vapour mixing ratio)
qv = MixRatio(e, pres)
# Calculate the temp at the Lifting Condensation Level
T_lcl = ((tempk-55)*2840 / (2840-(np.log(RH/100)*(tempk-55)))) + 55
# print "T_lcl :%.3f"%T_lcl
# DEBUG STUFF ####
theta_l = tempk * \
(100000./(pres-e))**(Rs_da/Cp_da)*(tempk/T_lcl)**(0.28*qv)
# print "theta_L: %.3f"%theta_l
# Calculate ThetaE
theta_e = theta_l * np.exp((Lv * qv) / (Cp_da * T_lcl))
return theta_e
def ThetaE_Bolton(tempk, pres, e, pref=100000.):
"""Theta_E following Bolton (1980)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
See http://en.wikipedia.org/wiki/Equivalent_potential_temperature
"""
# Preliminary:
T = tempk
qv = MixRatio(e, pres)
Td = DewPoint(e) + degCtoK
kappa_d = Rs_da / Cp_da
# Calculate TL (temp [K] at LCL):
TL = 56 + ((Td-56.)**-1+(np.log(T/Td)/800.))**(-1)
# print "TL: %.3f"%TL
# Calculate Theta_L:
thetaL = T * (pref/(pres-e))**kappa_d*(T/TL)**(0.28*qv)
# print "theta_L: %.3f"%thetaL
# put it all together to get ThetaE
thetaE = thetaL * np.exp((3036./TL-0.78)*qv*(1+0.448*qv))
return thetaE
def ThetaV(tempk, pres, e):
"""Virtual Potential Temperature
INPUTS
tempk (K)
pres (Pa)
e: Water vapour pressure (Pa) (Optional)
OUTPUTS
theta_v : Virtual potential temperature
"""
mixr = MixRatio(e, pres)
theta = Theta(tempk, pres)
return theta * (1+mixr/Epsilon) / (1+mixr)
def GammaW(tempk, pres):
"""Function to calculate the moist adiabatic lapse rate (deg C/Pa) based
on the environmental temperature and pressure.
INPUTS:
tempk (K)
pres (Pa)
RH (%)
RETURNS:
GammaW: The moist adiabatic lapse rate (Deg C/Pa)
REFERENCE:
http://glossary.ametsoc.org/wiki/Moist-adiabatic_lapse_rate
(Note that I multiply by 1/(grav*rho) to give MALR in deg/Pa)
"""
tempc = tempk-degCtoK
es = VaporPressure(tempc)
ws = MixRatio(es, pres)
# tempv=VirtualTempFromMixR(tempk,ws)
tempv = VirtualTemp(tempk, pres, es)
latent = Latentc(tempc)
Rho = pres / (Rs_da*tempv)
# This is the previous implementation:
# A=1.0+latent*ws/(Rs_da*tempk)
# B=1.0+Epsilon*latent*latent*ws/(Cp_da*Rs_da*tempk*tempk)
# Gamma=(A/B)/(Cp_da*Rho)
# This is algebraically identical but a little clearer:
A = -1. * (1.0+latent*ws/(Rs_da*tempk))
B = Rho * (Cp_da+Epsilon*latent*latent*ws/(Rs_da*tempk*tempk))
Gamma = A / B
return Gamma
def DensHumid(tempk, pres, e):
"""Density of moist air.
This is a bit more explicit and less confusing than the method below.
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
SOURCE: http://en.wikipedia.org/wiki/Density_of_air
"""
pres_da = pres - e
rho_da = pres_da / (Rs_da * tempk)
rho_wv = e/(Rs_v * tempk)
return rho_da + rho_wv
def Density(tempk, pres, mixr):
"""Density of moist air
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
"""
virtualT = VirtualTempFromMixR(tempk, mixr)
return pres / (Rs_da * virtualT)
def VirtualTemp(tempk, pres, e):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
e: vapour pressure (Pa)
p: static pressure (Pa)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia)."""
tempvk = tempk / (1-(e/pres)*(1-Epsilon))
return tempvk
def VirtualTempFromMixR(tempk, mixr):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
mixr: Mixing Ratio (kg/kg)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia). This is an approximation
based on a m
"""
return tempk * (1.0+0.6*mixr)
def Latentc(tempc):
"""Latent heat of condensation (vapourisation)
INPUTS:
tempc (C)
OUTPUTS:
L_w (J/kg)
SOURCE:
http://en.wikipedia.org/wiki/Latent_heat#Latent_heat_for_condensation_of_water
"""
return 1000 * (2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)
def VaporPressure(tempc, phase="liquid"):
"""Water vapor pressure over liquid water or ice.
INPUTS:
tempc: (C) OR dwpt (C), if SATURATION vapour pressure is desired.
phase: ['liquid'],'ice'. If 'liquid', do simple dew point. If 'ice',
return saturation vapour pressure as follows:
Tc>=0: es = es_liquid
Tc <0: es = es_ice
RETURNS: e_sat (Pa)
SOURCE: http://cires.colorado.edu/~voemel/vp.html (#2:
CIMO guide (WMO 2008), modified to return values in Pa)
This formulation is chosen because of its appealing simplicity,
but it performs very well with respect to the reference forms
at temperatures above -40 C. At some point I'll implement Goff-Gratch
(from the same resource).
"""
over_liquid = 6.112 * np.exp(17.67*tempc/(tempc+243.12))*100.
over_ice = 6.112 * np.exp(22.46*tempc/(tempc+272.62))*100.
# return where(tempc<0,over_ice,over_liquid)
if phase == "liquid":
# return 6.112*exp(17.67*tempc/(tempc+243.12))*100.
return over_liquid
elif phase == "ice":
# return 6.112*exp(22.46*tempc/(tempc+272.62))*100.
return np.where(tempc < 0, over_ice, over_liquid)
else:
raise NotImplementedError
def SatVap(dwpt, phase="liquid"):
"""This function is deprecated, return ouput from VaporPres"""
print("WARNING: This function is deprecated, please use VaporPressure()" +
" instead, with dwpt as argument")
return VaporPressure(dwpt, phase)
def MixRatio(e, p):
"""Mixing ratio of water vapour
INPUTS
e (Pa) Water vapor pressure
p (Pa) Ambient pressure
RETURNS
qv (kg kg^-1) Water vapor mixing ratio`
"""
return Epsilon * e / (p - e)
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv)
def DewPoint(e):
""" Use Bolton's (1980, MWR, p1047) formulae to find tdew.
INPUTS:
e (Pa) Water Vapor Pressure
OUTPUTS:
Td (C)
"""
ln_ratio = np.log(e/611.2)
Td = ((17.67-ln_ratio)*degCtoK+243.5*ln_ratio)/(17.67-ln_ratio)
return Td - degCtoK
def WetBulb(tempc, RH):
"""Stull (2011): Wet-Bulb Temperature from Relative Humidity and Air
Temperature.
INPUTS:
tempc (C)
RH (%)
OUTPUTS:
tempwb (C)
"""
Tw = tempc * np.arctan(0.151977*(RH+8.313659)**0.5) + \
np.arctan(tempc+RH) - np.arctan(RH-1.676331) + \
0.00391838*RH**1.5*np.arctan(0.023101*RH) - \
4.686035
return Tw
# unit conversions
def convert_unit_and_save_data_ppb_ugm3(filename_, station_name):
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'11' : 1.96, # O3
'10' : 1.25, # NO
'9' : 1.88, # NO2
'16' : 2.62, # SO2
'8' : 1.15} # CO
new_unit_name = '[$\mu$g/m$^3$]'
parameter_name_mod = {'9' : 'NO$_2$',
'11' : 'O$_3$',
'12' : 'PM$_1$$_0$',
'13' : 'PM$_2$$_.$$_5$',
'7' : 'CO$_2$',
'16' : 'SO$_2$',
}
# station_name = 'QF_01'
data_array = open_csv_file(filename_)
current_header = data_array[0,:]
new_header = np.array(current_header)
v_current = np.array(data_array[1:,:],dtype=float)
v_new = np.array(v_current)
for keys_ in parameters_unit_scaling.keys():
v_new[:, int(keys_)] = v_current[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
# add station name suffix
for i_ in range(5,22):
if str(i_) in parameter_name_mod.keys():
parameter_name = parameter_name_mod[str(i_)]
else:
parameter_name = current_header[i_].split('_')[0]
if str(i_) in parameters_unit_scaling.keys():
parameter_unit = new_unit_name
else:
parameter_unit = current_header[i_].split('_')[1]
new_header[i_] = station_name + '_' + parameter_name + '_' + parameter_unit
data_array[1:,:] = v_new
data_array[0,:] = new_header
filename_new = filename_.split('\\')[-1].split('.')[0] + '_unit_converted.csv'
current_filename_without_path = filename_.split('\\')[-1]
current_filename_path = filename_[:-len(current_filename_without_path)]
numpy_save_txt(current_filename_path + filename_new, data_array)
print('done!')
def save_data_with_unit_conversion_ppb_ugm3(file_list_path):
file_list = sorted(glob.glob(str(file_list_path + '\\' + '*.csv')))
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'12' : 1.96, # O3
'13' : 1.25, # NO
'14' : 1.88, # NO2
'15' : 2.62, # SO2
'16' : 1.15} # CO
parameters_new_names = ['YYYY', # 0
'MM', # 1
'DD', # 2
'HH', # 3
'mm', # 4
'Day of the week', # 5
'WD degrees', # 6
'WS m/s', # 7
'Temp Celsius', # 8
'RH %', # 9
'SR W/m2', # 10
'ATP mbar', # 11
'O3 ug/m3', # 12
'NO ug/m3', # 13
'NO2 ug/m3', # 14
'SO2 ug/m3', # 15
'CO mg/m3', # 16
'CO2 ppm', # 17
'PM10 ug/m3', # 18
'PM2.5 ug/m3', # 19
'THC ppm', # 20
'Rain mm', # 21
'Ox ppb', # 22
'NOx ppb'] # 23
for month_ in range(1,13):
print(month_)
filename_old = file_list[month_ -1]
data_array = open_csv_file(file_list[month_ -1])
v_ppb = np.array(data_array[1:,:],dtype=float)
v_ug_m3 = np.array(v_ppb)
for keys_ in parameters_unit_scaling.keys():
v_ug_m3[:, int(keys_)] = v_ppb[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
data_array[0, :] = parameters_new_names
data_array[1:,:] = v_ug_m3
filename_new = filename_old.split('\\')[-1].split('.')[0] + '_ugm3.csv'
numpy_save_txt(file_list_path + '\\' + filename_new, data_array)
print('done!')
def RH_to_abs_conc(arr_RH,arr_T):
a_ = 1-(373.15/arr_T)
c_1 = 13.3185
c_2 = -1.97
c_3 = -.6445
c_4 = -.1299
Po_H2O = 1013.25 * e_constant ** ((c_1 * (a_**1)) +
(c_2 * (a_**2)) +
(c_3 * (a_**3)) +
(c_4 * (a_**4)) ) # mbar
return (arr_RH * Po_H2O) / (100 * boltzmann_ * arr_T)
def Mixing_Ratio_to_molecules_per_cm3(arr_MR, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_Molec_per_cm3 = arr_MR * ( ATP_mbar / ( boltzmann_ * arr_temp ) ) # molecules / cm3
return arr_Molec_per_cm3
def molecules_per_cm3_to_Mixing_Ratio(arr_Molec_per_cm3, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_MR = (arr_Molec_per_cm3 * boltzmann_ * arr_temp) / ATP_mbar
return arr_MR
def ws_knots_to_ms(arr_):
return arr_ * .514444
def ws_ms_to_knots(arr_):
return arr_ / .514444
def kelvin_to_celsius(arr_temp_k):
return arr_temp_k - 273.15
def celsius_to_kelvin(arr_temp_c):
return arr_temp_c + 273.15
# geo reference
def find_index_from_lat_lon(series_lat, series_lon, point_lat_list, point_lon_list):
lat_index_list = []
lon_index_list = []
# mask arrays
lat_m = series_lat
lon_m = series_lon
if np.sum(lat_m) != np.sum(lat_m) or np.sum(lon_m) != np.sum(lon_m):
lat_m = np.ma.masked_where(np.isnan(lat_m), lat_m)
lat_m = np.ma.masked_where(np.isinf(lat_m), lat_m)
lon_m = np.ma.masked_where(np.isnan(lon_m), lon_m)
lon_m = np.ma.masked_where(np.isinf(lon_m), lon_m)
if type(point_lat_list) == tuple or type(point_lat_list) == list:
for lat_ in point_lat_list:
lat_index_list.append(np.argmin(np.abs(lat_m - lat_)))
for lon_ in point_lon_list:
lon_index_list.append(np.argmin(np.abs(lon_m - lon_)))
else:
lat_index_list = np.argmin(np.abs(lat_m - point_lat_list))
lon_index_list = np.argmin(np.abs(lon_m - point_lon_list))
return lat_index_list, lon_index_list
def find_index_from_lat_lon_2D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_2d_array(dist_arr)
def find_index_from_lat_lon_1D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_1d_array(dist_arr)
def distance_array_lat_lon_2D_arrays_degrees(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
return ( lat_del_arr**2 + lon_del_arr**2 )**0.5
def meter_per_degrees(lat_point):
lat_mean_rad = np.deg2rad(np.abs(lat_point))
m_per_deg_lat = 111132.954 - 559.822 * np.cos(2 * lat_mean_rad) + 1.175 * np.cos(4 * lat_mean_rad)
m_per_deg_lon = 111132.954 * np.cos(lat_mean_rad)
return np.abs(m_per_deg_lat), np.abs(m_per_deg_lon)
def degrees_per_meter(lat_point):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(lat_point)
return 1/m_per_deg_lat, 1/m_per_deg_lon
def distance_array_lat_lon_2D_arrays_degress_to_meters(lat_arr, lon_arr, point_lat, point_lon):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(np.nanmean(lat_arr))
lat_del_arr_m = (lat_arr - point_lat) * m_per_deg_lat
lon_del_arr_m = (lon_arr - point_lon) * m_per_deg_lon
return ( lat_del_arr_m**2 + lon_del_arr_m**2 )**0.5
def distance_between_to_points_in_meters(point_1_latlon, point_2_latlon):
latMid = (point_1_latlon[0] + point_2_latlon[0]) / 2
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(latMid)
del_lat = (point_1_latlon[0] - point_2_latlon[0]) * m_per_deg_lat
del_lon = (point_1_latlon[1] - point_2_latlon[1]) * m_per_deg_lon
return ((del_lat**2) + (del_lon**2))**0.5
# Data Loading
def numpy_load_txt(filename_, delimiter_=",", format_=float, skip_head=0):
return genfromtxt(filename_, delimiter=delimiter_, dtype=format_, skip_header=skip_head)
def open_csv_file(filename_, delimiter=',', skip_head=0, dtype='<U32'):
# load data
return np.array(genfromtxt(filename_, delimiter=delimiter, dtype=dtype, skip_header=skip_head))
def load_time_columns(filename_):
## user defined variables
day_column_number = 2
month_column_number = 1
year_column_number = 0
hour_column_number = 3
minute_column_number = 4
time_header = 'Time' #defining time header
data_array = open_csv_file(filename_)
# define arrays
values_str = data_array[1:,5:]
values_ = np.zeros((values_str.shape[0],values_str.shape[1]),dtype=float)
for r_ in range(values_.shape[0]):
for c_ in range(values_.shape[1]):
try:
values_[r_,c_] = float(values_str[r_,c_])
except:
values_[r_,c_] = np.nan
header_ = data_array[0 ,1:]
# defining time arrays
time_days = np.zeros(data_array.shape[0] - 1, dtype=float)
time_month = np.zeros(data_array.shape[0] - 1, dtype=int)
time_weekday = np.zeros(data_array.shape[0] - 1, dtype=int)
time_hour = np.zeros(data_array.shape[0] - 1)
for r_ in range(data_array.shape[0] - 1):
time_days[r_] = mdates.date2num(datetime.datetime(
int(float(data_array[r_+1,year_column_number])),
int(float(data_array[r_+1,month_column_number])),
int(float(data_array[r_+1,day_column_number])),
int(float(data_array[r_+1,hour_column_number])),
int(float(data_array[r_+1,minute_column_number]))))
time_month[r_] = int(float(data_array[r_+1,month_column_number]))
time_weekday[r_] = datetime.datetime.weekday(mdates.num2date(time_days[r_]))
time_hour[r_] = float(data_array[r_+1,hour_column_number]) + (float(data_array[r_+1,minute_column_number]) / 60)
# compile names
header_[0] = time_header
header_[1] = 'Month'
header_[2] = 'Day of week'
header_[3] = 'Hour of day'
# compile values
values_ = np.column_stack((time_days, time_month, time_weekday, time_hour, values_))
return header_, values_
def load_object(filename):
with open(filename, 'rb') as input_object:
object_ = pickle.load(input_object)
return object_
def read_one_line_from_text_file(filename_, line_number):
file_ = open(filename_)
for i, line in enumerate(file_):
if i == line_number :
line_str = line
elif i > line_number:
break
file_.close()
return line_str
# data saving/output
def save_time_variable_as_csv(output_filename, var_name, time_in_secs, var_values, time_format_output='%Y%m%d%H%M%S'):
out_file = open(output_filename, 'w')
# write header
out_file.write(time_format_output)
out_file.write(',')
out_file.write(var_name)
out_file.write('\n')
for r_ in range(time_in_secs.shape[0]):
p_progress_bar(r_, time_in_secs.shape[0])
out_file.write(time_seconds_to_str(time_in_secs[r_], time_format_output))
out_file.write(',' + str(var_values[r_]))
out_file.write('\n')
out_file.close()
def numpy_save_txt(filename_, array_, delimiter_=",", format_='%s'):
np.savetxt(filename_, array_, delimiter=delimiter_, fmt=format_)
def save_array_to_disk(header_with_units, time_in_seconds, values_in_floats, filename):
#
if len(values_in_floats.shape) == 1:
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm', header_with_units]
else:
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm']
for parameter_ in header_with_units:
header_to_print.append(parameter_)
# create values block
T_ = time_seconds_to_5C_array(time_in_seconds)
P_ = np.column_stack((T_, values_in_floats))
# change type to str
P_str = np.array(P_, dtype='<U32')
# join header with values
P_final = np.row_stack((header_to_print, P_str))
# save to hard drive
numpy_save_txt(filename, P_final)
print('final data saved to: ' + filename)
def save_HVF(header_, values_, filename):
# check if all shapes match
if len(header_) != values_.shape[1]:
print('shape of header is not compatible with shape of values')
return
time_in_seconds = mdates.num2epoch(values_[:, 0])
header_with_units = header_[2:]
values_in_floats = values_[:, 2:]
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm']
for parameter_ in header_with_units:
header_to_print.append(parameter_)
# create values block
T_ = np.zeros((time_in_seconds.shape[0], 5), dtype='<U32')
for r_ in range(time_in_seconds.shape[0]):
if time_in_seconds[r_] == time_in_seconds[r_]:
T_[r_] = time.strftime("%Y,%m,%d,%H,%M", time.gmtime(time_in_seconds[r_])).split(',')
P_ = np.column_stack((T_, values_in_floats))
# change type to str
P_str = np.array(P_, dtype='<U32')
# join header with values
P_final = np.row_stack((header_to_print, P_str))
# save to hard drive
numpy_save_txt(filename, P_final)
print('final data saved to: ' + filename)
def save_simple_array_to_disk(header_list, values_array, filename_):
# change type to str
values_str = np.array(values_array, dtype='<U32')
# join header with values
array_final = np.row_stack((header_list, values_str))
# save to hard drive
numpy_save_txt(filename_, array_final)
print('final data saved to: ' + filename_)
def save_array_as_is(array_, filename_):
np.savetxt(filename_, array_, delimiter=",", fmt='%s')
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
# png data handeling
def store_array_to_png(array_, filename_out):
"""
This function saves an array to a png file while keeping as much accuracy as possible with the lowest memory ussage
:param array_: numpy array
:param filename_out: string with full path
:return: none
"""
# shape
rows_ = array_.shape[0]
columns_ = array_.shape[1]
# nan layer
array_nan = np.zeros((rows_, columns_), dtype='uint8')
array_nan[array_ != array_] = 100
# replace nans
array_[array_ != array_] = 0
# convert to all positive
array_positive = np.abs(array_)
# sign layer
array_sign = np.zeros((rows_, columns_), dtype='uint8')
array_sign[array_ >= 0] = 100
# zeros array
array_zeros = np.zeros((rows_, columns_), dtype='uint8')
array_zeros[array_positive != 0] = 1
# sub 1 array
array_sub1 = np.zeros((rows_, columns_), dtype='uint8')
array_sub1[array_positive<1] = 1
array_sub1 = array_sub1 * array_zeros
# power array
exp_ = np.array(np.log10(array_positive), dtype=int)
exp_[array_zeros==0] = 0
# integral array
array_integral = array_positive / 10 ** np.array(exp_, dtype=float)
# array_layer_1
array_layer_1 = np.array(((array_sub1 * 9) + 1) * array_integral * 10, dtype='uint8') + array_sign
# array_layer_2
array_layer_2 = np.array(((array_integral * ((array_sub1 * 9) + 1) * 10)
- np.array(array_integral * ((array_sub1 * 9) + 1) * 10, dtype='uint8')) * 100,
dtype='uint8')
array_layer_2 = array_layer_2 + array_nan
# power sign layer
exp_ = exp_ - array_sub1
array_power_sign = np.zeros((rows_, columns_), dtype='uint8')
array_power_sign[exp_ >= 0] = 100
# array_layer_3
array_layer_3 = np.abs(exp_) + array_power_sign
# initialize out array
out_array = np.zeros((rows_, columns_, 3), dtype='uint8')
# dump into out array
out_array[:, :, 0] = array_layer_1
out_array[:, :, 1] = array_layer_2
out_array[:, :, 2] = array_layer_3
img_arr = PIL_Image.fromarray(out_array)
img_arr.save(filename_out)
def read_png_to_array(filename_):
"""
This functions converts pngs files created by "store_array_to_png" back to numpy arrays
:param filename_: string with full path name to png file created by store_array_to_png
:return: numpy array
"""
# read image into array
img_arr = np.array(PIL_Image.open(filename_))
# shape
rows_ = img_arr.shape[0]
columns_ = img_arr.shape[1]
# nan array
nan_array = np.zeros((rows_, columns_), dtype='uint8')
nan_array[img_arr[:,:,1] >= 100] = 1
# power array
power_array_magnitude = ((img_arr[:,:,2]/100) - np.array(img_arr[:,:,2]/100, dtype='uint8') ) * 100
sign_array = np.zeros((rows_, columns_)) - 1
sign_array[img_arr[:,:,2] >= 100] = 1
power_array = power_array_magnitude * sign_array
# sign array
sign_array = np.array(img_arr[:,:,0]/100, dtype=int)
sign_array[sign_array == 0] = -1
# unit array
unit_array = np.array(img_arr[:,:,0]/10, dtype='uint8') - (np.array(img_arr[:,:,0]/100, dtype='uint8') * 10)
# decimal array
decimal_array_1 = (img_arr[:,:,0]/10) - np.array(img_arr[:,:,0]/10, dtype='uint8')
decimal_array_2 = ((img_arr[:,:,1]/100) - np.array(img_arr[:,:,1]/100, dtype='uint8') ) / 10
# compute out array
out_array = (sign_array * (unit_array + decimal_array_1 + decimal_array_2)) * 10 ** power_array
# flag nans
out_array[nan_array==1]=np.nan
return out_array
# sattelite data load
def load_OMI_NO2_monthly_data(filename_):
# # [molec./cm-2]
# filename_ = 'C:\\_input\\no2_201601.grd'
# arr_NO2, lat_arr_NO2, lon_arr_NO2 = load_OMI_NO2_monthly_data(filename_)
# [440: -820, 1650: 1960]
data_array = genfromtxt(filename_, dtype=float, skip_header=7)
file_object = open(filename_,mode='r')
ncols = int(file_object.readline().split()[-1])
nrows = int(file_object.readline().split()[-1])
xllcorner = float(file_object.readline().split()[-1])
yllcorner = float(file_object.readline().split()[-1])
cellsize = float(file_object.readline().split()[-1])
nodata_value = float(file_object.readline().split()[-1])
# version = file_object.readline().split()[-1]
file_object.close()
lat_arr = np.zeros((nrows, ncols), dtype=float)
lon_arr = np.zeros((nrows, ncols), dtype=float)
lat_series = np.linspace(yllcorner + (cellsize * nrows), yllcorner, nrows)
lon_series = np.linspace(xllcorner, xllcorner + (cellsize * ncols), ncols)
for r_ in range(nrows):
lon_arr[r_, :] = lon_series
for c_ in range(ncols):
lat_arr[:, c_] = lat_series
data_array[data_array==nodata_value] = np.nan
data_array = data_array * 1e13
return data_array[1:-1,:], lat_arr[1:-1,:], lon_arr[1:-1,:]
def load_OMI_HCHO_monthly_data(filename_):
# # [molec./cm-2]
# filename_ = 'C:\\_input\\OMIH2CO_Grid_720x1440_201601.dat'
# arr_HCHO, lat_arr_HCHO, lon_arr_HCHO = load_OMI_HCHO_monthly_data(filename_)
# [220: -410, 825: 980]
data_array = genfromtxt(filename_, dtype=float, skip_header=7)
ncols = 1440
nrows = 720
xllcorner = -180
yllcorner = -90
cellsize = 0.25
lat_arr = np.zeros((nrows, ncols), dtype=float)
lon_arr = np.zeros((nrows, ncols), dtype=float)
lat_series = np.linspace(yllcorner + (cellsize * nrows), yllcorner, nrows)
lon_series = np.linspace(xllcorner, xllcorner + (cellsize * ncols), ncols)
for r_ in range(nrows):
lon_arr[r_, :] = lon_series
for c_ in range(ncols):
lat_arr[:, c_] = lat_series
data_array = data_array * 1e15
return data_array[1:-1,:], lat_arr[1:-1,:], lon_arr[1:-1,:]
def download_HIM8_AUS_ch3_500m(YYYYmmddHHMM_str):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + \
'/' + \
YYYYmmddHHMM_str[4:6] + \
'/' + \
YYYYmmddHHMM_str[6:8] + \
'/' + \
YYYYmmddHHMM_str[8:12] + \
'/' + \
YYYYmmddHHMM_str + '00' \
'-P1S-ABOM_BRF_B03-PRJ_GEOS141_500-HIMAWARI8-AHI.nc'
f_ = nc.Dataset(url_)
r_1 = 13194
r_2 = 19491
c_1 = 4442
c_2 = 14076
return f_.variables['channel_0003_brf'][0, r_1:r_2, c_1:c_2]
def download_HIM8_AUS_2000m(YYYYmmddHHMM_str, channel_number_str, print_=True):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
if print_: print('downloading HIM_8', YYYYmmddHHMM_str, channel_number_str)
f_ = nc.Dataset(url_)
r_1 = 3298
r_2 = 4873
c_1 = 1110
c_2 = 3519
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
return f_.variables[variable_name][0, r_1:r_2, c_1:c_2]
def download_HIM8_2000m(YYYYmmddHHMM_str, channel_number_str):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
f_ = nc.Dataset(url_)
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
print('downloading variable:', variable_name)
return f_.variables[variable_name][0, :,:]
def download_HIM8_AUS_truecolor_2000m(YYYYmmddHHMM_str):
H8_b = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '01')
H8_g = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '02')
H8_r = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '03')
img_ = np.zeros((H8_b.shape[0], H8_b.shape[1], 3), dtype='uint8')
img_[:, :, 0] = H8_r * 170
img_[:, :, 1] = H8_g * 170
img_[:, :, 2] = H8_b * 170
return img_
def download_HIM8_truecolor_2000m(YYYYmmddHHMM_str):
H8_b = download_HIM8_2000m(YYYYmmddHHMM_str, '01')
H8_g = download_HIM8_2000m(YYYYmmddHHMM_str, '02')
H8_r = download_HIM8_2000m(YYYYmmddHHMM_str, '03')
img_ = np.zeros((H8_b.shape[0], H8_b.shape[1], 3), dtype='uint8')
img_[:, :, 0] = H8_r * 170
img_[:, :, 1] = H8_g * 170
img_[:, :, 2] = H8_b * 170
return img_
def download_lat_lon_arrays_HIM8_500():
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/ancillary/' \
'20150127000000-P1S-ABOM_GEOM_SENSOR-PRJ_GEOS141_500-HIMAWARI8-AHI.nc'
lat_ = download_big_nc_array_in_parts(url_, 'lat')
lon_ = download_big_nc_array_in_parts(url_, 'lon')
lat_[lat_ > 360] = np.nan
lon_[lon_ > 360] = np.nan
return lat_, lon_
def download_lat_lon_arrays_HIM8_2000():
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/ancillary/' \
'20150127000000-P1S-ABOM_GEOM_SENSOR-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
lat_ = download_big_nc_array_in_parts(url_, 'lat')
lon_ = download_big_nc_array_in_parts(url_, 'lon')
lat_[lat_ > 360] = np.nan
lon_[lon_ > 360] = np.nan
return lat_, lon_
def download_big_nc_array_in_parts(url_, variable_name, parts_=4):
f_ = nc.Dataset(url_)
var_shape = f_.variables[variable_name].shape
print('downloading variable', variable_name, 'with shape:', var_shape)
if len(var_shape) == 0:
print('ERROR! variable is not an array')
return None
elif len(var_shape) == 1:
if var_shape[0] == 1:
print('ERROR! variable is a scalar')
return None
else:
rows_per_part = int(var_shape[0] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros(var_shape[0])
for part_ in range(parts_ - 1):
output_array[int(part_*rows_per_part):int((part_+1)*rows_per_part)] =\
f_.variables[variable_name][int(part_*rows_per_part):int((part_+1)*rows_per_part)]
output_array[int((parts_ -1)*rows_per_part):] = \
f_.variables[variable_name][int((parts_ -1)*rows_per_part):]
return output_array
elif len(var_shape) == 2:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part)] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part)]
output_array[:,int((parts_ - 1) * rows_per_part):] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):]
return output_array
elif len(var_shape) == 3:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1],var_shape[2]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:]
output_array[:,int((parts_ - 1) * rows_per_part):,:] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):,:]
return output_array
elif len(var_shape) == 4:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1],var_shape[2],var_shape[3]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:,:] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:,:]
output_array[:,int((parts_ - 1) * rows_per_part):,:,:] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):,:,:]
return output_array
elif len(var_shape) > 4:
print('ERROR! variable has more than 4 dimensions, not implemented for this many dimentions')
return None
def get_himawari8_2000m_NCI(YYYYmmddHHMM_str, channel_number, output_format='png',
output_path='/g/k10/la6753/data/', row_start=0, row_stop=5500, col_start=0,
col_stop=5500):
"""
gets array from himawari-8 netcdf files and extracts only the indicated channel at the indicated time. saves to output_path
:param YYYYmmddHHMM_str: string with the time in four digits for year, two digits for months...
:param channel_number: int or float with the number of the channel ('01'-'16')
:param output_format: string with either 'png' or 'numpy'. If png the array will be saved used store_array_to_png, otherwise numpy.save will be used
:param output_path: string with the path, or full filename to be used to save the file
:param row_start: int with the row number to start the crop
:param row_stop: int with the row number to stop the crop
:param col_start: int with the coloumn number to start the crop
:param col_stop: int with the coloumn number to stop the crop
:return: None
"""
channel_number_str = str(int(channel_number)).zfill(2)
filename_ = '/g/data/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
if os.path.exists(filename_):
f_ = nc.Dataset(filename_)
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
array_ = f_.variables[variable_name][0, row_start:row_stop, col_start:col_stop]
if output_path[-1] == '/' or output_path[-1] == '\\':
if output_format == 'png':
output_filename = output_path + 'him_2000m_ch' + channel_number_str + '_' + YYYYmmddHHMM_str + '.png'
else:
output_filename = output_path + 'him_2000m_ch' + channel_number_str + '_' + YYYYmmddHHMM_str + '.npy'
else:
output_filename = output_path
if output_format == 'png':
store_array_to_png(array_, output_filename)
else:
np.save(output_filename, array_)
else:
print('File not available for time stamp:', YYYYmmddHHMM_str)
# ERA5
def create_virtual_sondes_from_ERA5(time_stamp_sec, lat_lon_tuple, era5_file_levels_ncFile, era5_file_surface_ncFile,
max_time_delta_sec=21600, show_prints=True):
close_level_file=False
close_surface_file=False
if type(era5_file_levels_ncFile) == str:
era5_file_levels = nc.Dataset(era5_file_levels_ncFile)
close_level_file = True
else:
era5_file_levels = era5_file_levels_ncFile
if type(era5_file_surface_ncFile) == str:
era5_file_surface = nc.Dataset(era5_file_surface_ncFile)
close_surface_file = True
else:
era5_file_surface = era5_file_surface_ncFile
time_era5_levels_sec = time_era5_to_seconds(era5_file_levels.variables['time'][:])
time_era5_surface_sec = time_era5_to_seconds(era5_file_surface.variables['time'][:])
r_era5_levels_1 = time_to_row_sec(time_era5_levels_sec, time_stamp_sec)
r_era5_surface_1 = time_to_row_sec(time_era5_surface_sec, time_stamp_sec)
if np.abs(time_era5_levels_sec[r_era5_levels_1] - time_stamp_sec) > max_time_delta_sec:
if show_prints: print('error time gap is too large', )
return None
# find row and column for the lat lon
lat_index, lon_index = find_index_from_lat_lon(era5_file_levels.variables['latitude'][:].data,
era5_file_levels.variables['longitude'][:].data,
lat_lon_tuple[0], lat_lon_tuple[1])
if show_prints: print('creating input arrays')
t_profile = kelvin_to_celsius(era5_file_levels.variables['t'][r_era5_levels_1, :, lat_index, lon_index].data)
if show_prints: print('created t_array')
td_profile = calculate_dewpoint_from_T_RH(t_profile, era5_file_levels.variables['r'][r_era5_levels_1, :, lat_index, lon_index].data)
if show_prints: print('created Td_array')
h_profile = era5_file_levels.variables['z'][r_era5_levels_1, :, lat_index, lon_index].data / gravity_
if show_prints: print('created z_array')
u_profile = era5_file_levels.variables['u'][r_era5_levels_1, :, lat_index, lon_index].data
if show_prints: print('created u_array')
v_profile = era5_file_levels.variables['v'][r_era5_levels_1, :, lat_index, lon_index].data
if show_prints: print('created v_array')
p_profile = era5_file_levels.variables['level'][:].data # hPa
if show_prints: print('created p_array')
surface_p = era5_file_surface.variables['sp'][r_era5_surface_1, lat_index, lon_index] / 100 # / 100 to convert Pa to hPa
if show_prints: print('created sp_array')
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
if close_level_file:
era5_file_levels.close()
if close_surface_file:
era5_file_surface.close()
return P_array, H_array, T_array, Td_array, U_array, V_array
def era5_get_surface_interpolated_vars(era5_file_levels_ncFile, era5_file_surface_ncFile, show_prints=True,
time_start_str_YYYYmmDDHHMM=None, time_stop_str_YYYYmmDDHHMM=None):
close_level_file=False
close_surface_file=False
if type(era5_file_levels_ncFile) == str:
era5_file_levels = nc.Dataset(era5_file_levels_ncFile)
close_level_file = True
else:
era5_file_levels = era5_file_levels_ncFile
if type(era5_file_surface_ncFile) == str:
era5_file_surface = nc.Dataset(era5_file_surface_ncFile)
close_surface_file = True
else:
era5_file_surface = era5_file_surface_ncFile
time_era5_levels_sec = time_era5_to_seconds(era5_file_levels.variables['time'][:])
# trim time
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM is not None:
r_1 = time_to_row_str(time_era5_levels_sec, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM is not None:
r_2 = time_to_row_str(time_era5_levels_sec, time_stop_str_YYYYmmDDHHMM)
time_era5_sec = time_era5_levels_sec[r_1:r_2]
if show_prints: print('creating input arrays')
t_profile = kelvin_to_celsius(era5_file_levels.variables['t'][r_1:r_2, 10:, :, :].data)
if show_prints: print('created t_array')
td_profile = calculate_dewpoint_from_T_RH(t_profile, era5_file_levels.variables['r'][r_1:r_2, 10:, :, :].data)
if show_prints: print('created Td_array')
h_profile = era5_file_levels.variables['z'][r_1:r_2, 10:, :, :].data / gravity_
if show_prints: print('created z_array')
u_profile = era5_file_levels.variables['u'][r_1:r_2, 10:, :, :].data
if show_prints: print('created u_array')
v_profile = era5_file_levels.variables['v'][r_1:r_2, 10:, :, :].data
if show_prints: print('created v_array')
p_profile = era5_file_levels.variables['level'][10:].data # hPa
if show_prints: print('created p_array')
surface_p = era5_file_surface.variables['sp'][r_1:r_2, :, :] / 100 # / 100 to convert Pa to hPa
if show_prints: print('created sp_array')
q_profile = era5_file_levels.variables['q'][r_1:r_2, 10:, :, :].data
if show_prints: print('created q_array')
####################################### find extrapolations
surface_t = np.zeros((surface_p.shape), dtype=float)
surface_td = np.zeros((surface_p.shape), dtype=float)
surface_u = np.zeros((surface_p.shape), dtype=float)
surface_v = np.zeros((surface_p.shape), dtype=float)
surface_h = np.zeros((surface_p.shape), dtype=float)
surface_q = np.zeros((surface_p.shape), dtype=float)
if show_prints: print('starting interpolation of every point in time')
for r_ in range(time_era5_sec.shape[0]):
p_progress_bar(r_,time_era5_sec.shape[0])
for lat_ in range(surface_p.shape[1]):
for lon_ in range(surface_p.shape[2]):
surface_t [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), t_profile [r_,:,lat_,lon_])
surface_td[r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), td_profile[r_,:,lat_,lon_])
surface_u [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), u_profile [r_,:,lat_,lon_])
surface_v [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), v_profile [r_,:,lat_,lon_])
surface_h [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), h_profile [r_,:,lat_,lon_])
surface_q [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), q_profile [r_,:,lat_,lon_])
if close_level_file:
era5_file_levels.close()
if close_surface_file:
era5_file_surface.close()
return surface_t, surface_td, surface_u, surface_v, surface_h, surface_q, time_era5_sec
# HYSPLIT
def hysplit_load_freq_endpoints(filename_, number_of_hours):
file_obj = open(filename_,'r')
line_list = file_obj.readlines()
file_obj.close()
file_traj_list = []
traj_number = -1
for line_inx, line_str in enumerate(line_list):
if line_str == ' 1 PRESSURE\n':
traj_number += 1
for r_ in range(number_of_hours + 1):
new_line_list = line_list[line_inx + r_ + 1].split()
new_line_list.append(traj_number)
file_traj_list.append(new_line_list)
arr_ = np.zeros((len(file_traj_list),12), dtype=float)
for r_ in range(len(file_traj_list)):
for c_ in range(12):
arr_[r_,c_] = file_traj_list[r_][c_ + 2]
return arr_
def hysplit_load_freq_endpoints_all(file_list):
file_traj_list = []
for filename_ in file_list:
file_obj = open(filename_,'r')
line_list = file_obj.readlines()
file_obj.close()
for line_inx, line_str in enumerate(line_list):
if line_str == ' 1 PRESSURE\n':
for r_ in range(25):
file_traj_list.append(line_list[line_inx + r_ + 1].split())
arr_ = np.zeros((len(file_traj_list),11), dtype=float)
for r_ in range(len(file_traj_list)):
for c_ in range(11):
arr_[r_,c_] = file_traj_list[r_][c_ + 2]
return arr_
def calculate_mean_time(file_list, lat_tuple, lon_tuple):
# file_list_irn = sorted(glob.glob(str('E:\\hysplit_IRN\\' + '*.txt')))
# file_list_uae = sorted(glob.glob(str('E:\\hysplit_UAE\\' + '*.txt')))
# lat_tuple = tuple((24.889974, 26.201930))
# lon_tuple = tuple((50.727086, 51.729315))
hit_counter_list = []
total_counter_list = []
# month_list_list = []
month_mean_time = []
month_std_time = []
month_probability_list = []
for filename_ in file_list:
arr_ = hysplit_load_freq_endpoints(filename_, 24)
hit_counter = 0
hit_age = []
total_number_of_trajs = int(np.max(arr_[:,-1]))
for traj_ in range(total_number_of_trajs + 1):
for r_ in range(arr_.shape[0]):
if arr_[r_,-1] == traj_:
if lat_tuple[0] < arr_[r_, 7] < lat_tuple[1] and lon_tuple[0] < arr_[r_, 8] < lon_tuple[1]:
hit_counter += 1
hit_age.append(arr_[r_, 6])
break
hit_counter_list.append(hit_counter)
total_counter_list.append(total_number_of_trajs)
month_probability_list.append(100*hit_counter/total_number_of_trajs)
# month_list_list.append(hit_age)
month_mean_time.append(np.mean(hit_age))
month_std_time.append(np.std(hit_age))
return month_probability_list, np.array(month_mean_time), hit_counter_list, total_counter_list, np.array(month_std_time)
# BOM
def Lidar_compile_and_convert_txt_to_dict(main_folder_path):
# main_folder_path = 'D:\Data\LIDAR Data\\'
# create the full file list
filename_list = []
path_folders_list = next(os.walk(main_folder_path))[1]
for sub_folder in path_folders_list:
if sub_folder[0] == '2':
path_sub_folders_list = next(os.walk(main_folder_path + sub_folder + '\\'))[1]
for sub_sub_folder in path_sub_folders_list:
path_sub_sub_sub = main_folder_path + sub_folder + '\\' + sub_sub_folder + '\\'
ssss_filelist = sorted(glob.glob(str(path_sub_sub_sub + '*.*')))
for filename_min in ssss_filelist:
filename_list.append(filename_min)
total_files = len(filename_list)
print(' number of files to compile:', str(total_files))
# get first file to get shape
convertion_output = Lidar_convert_txt_to_array(filename_list[0])
range_shape = convertion_output[1].shape[0]
# create arrays
time_array = np.zeros(total_files)
range_array = convertion_output[1][:,0]
ch0_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch0_mrg = np.zeros((total_files, range_shape), dtype=float)
ch1_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch1_mrg = np.zeros((total_files, range_shape), dtype=float)
ch2_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch2_mrg = np.zeros((total_files, range_shape), dtype=float)
print('arrays initialized')
# populate arrays
for i_, filename_ in enumerate(filename_list):
p_progress(i_, total_files)
convertion_output = Lidar_convert_txt_to_array(filename_)
time_array[i_] = convertion_output[0]
ch0_pr2[i_, :] = convertion_output[1][:,1]
ch0_mrg[i_, :] = convertion_output[1][:,2]
ch1_pr2[i_, :] = convertion_output[1][:,3]
ch1_mrg[i_, :] = convertion_output[1][:,4]
ch2_pr2[i_, :] = convertion_output[1][:,5]
ch2_mrg[i_, :] = convertion_output[1][:,6]
# move to dict
output_dict = {}
output_dict['time'] = time_array
output_dict['range'] = range_array
output_dict['ch0_pr2'] = ch0_pr2
output_dict['ch0_mrg'] = ch0_mrg
output_dict['ch1_pr2'] = ch1_pr2
output_dict['ch1_mrg'] = ch1_mrg
output_dict['ch2_pr2'] = ch2_pr2
output_dict['ch2_mrg'] = ch2_mrg
return output_dict
def Lidar_convert_txt_to_array(filename_):
file_time_str = filename_[-25:-6]
time_stamp_seconds = time_str_to_seconds(file_time_str, '%Y-%m-%d_%H-%M-%S')
# read the data into an array
data_array_raw = genfromtxt(filename_,dtype=float, delimiter='\t',skip_header=133)
# only keep one altitude column
data_array_out = np.zeros((data_array_raw.shape[0], 7), dtype=float)
data_array_out[:,0] = data_array_raw[:,0]
data_array_out[:,1] = data_array_raw[:,1]
data_array_out[:,2] = data_array_raw[:,2]
data_array_out[:,3] = data_array_raw[:,4]
data_array_out[:,4] = data_array_raw[:,5]
data_array_out[:,5] = data_array_raw[:,7]
data_array_out[:,6] = data_array_raw[:,8]
return time_stamp_seconds, data_array_out
def compile_AWAP_precip_datafiles(file_list):
# load first file to get shape
print('loading file: ', file_list[0])
arr_1, start_date_sec_1 = load_AWAP_data(file_list[0])
rows_ = arr_1.shape[0]
columns_ = arr_1.shape[1]
# create lat and lon series
series_lat = np.arange(-44.5, -9.95, 0.05)[::-1]
series_lon = np.arange(112, 156.29, 0.05)
# create time array
output_array_time = np.zeros(len(file_list), dtype=float)
# create output array
output_array = np.zeros((len(file_list), rows_, columns_), dtype=float)
# load first array data into output array
output_array[0,:,:] = arr_1
output_array_time[0] = start_date_sec_1
# loop thru remainning files to populate ouput_array
for t_, filename_ in enumerate(file_list[1:]):
print('loading file: ', filename_)
arr_t, start_date_sec_t = load_AWAP_data(filename_)
output_array[t_+1, :, :] = arr_t
output_array_time[t_+1] = start_date_sec_t
return output_array, output_array_time, series_lat, series_lon
def load_AWAP_data(filename_):
start_date_str = filename_.split('\\')[-1][:8]
# stop_date_str = filename_.split('\\')[-1][8:16]
start_date_sec = time_str_to_seconds(start_date_str, '%Y%m%d')
arr_precip = np.genfromtxt(filename_, float, skip_header=6, skip_footer=18)
return arr_precip , start_date_sec
def get_means_from_filelist(file_list, lat_lon_ar):
# lat_lon_points_list = [ 147.8,
# 149,
# -36.8,
# -35.4]
# box domain indexes
index_c = [716, 740]
index_r = [508, 536]
series_lat = np.arange(-44.5, -9.95, 0.05)[::-1]
series_lon = np.arange(112,156.3,0.05)
lat_index_list, lon_index_list = find_index_from_lat_lon(series_lat, series_lon, lat_lon_ar[:,1], lat_lon_ar[:,0])
time_secs_list = []
precip_array = np.zeros((277,9),dtype=float)
for r_, filename_ in enumerate(file_list):
print('loading file: ', filename_)
arr_precip, start_date_sec = load_AWAP_data(filename_)
time_secs_list.append(start_date_sec)
precip_array[r_, 0] = start_date_sec
precip_array[r_, 1] = np.mean(arr_precip[index_r[0]:index_r[1]+1, index_c[0]:index_c[1]+1])
for i_ in range(2,9):
precip_array[r_, i_] = arr_precip[lat_index_list[i_-2],lon_index_list[i_-2]]
save_array_to_disk(['box mean precip [mm]','1 precip [mm]','2 precip [mm]','3 precip [mm]',
'4 precip [mm]','5 precip [mm]','6 precip [mm]','7 precip [mm]'],
precip_array[:,0], precip_array[:,1:], 'C:\\_output\\test_fimi_2.csv')
# save_HVF(['box','1','2','3','4','5','6','7'], precip_array, 'C:\\_output\\test_fimi_1.csv')
print("done")
return precip_array
def compile_BASTA_days_and_save_figure(directory_where_nc_file_are):
# compile BASTA data per day and save plot (per day)
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# directory_where_nc_file_are = '/home/luis/Data/BASTA/L0/12m5/'
path_input = directory_where_nc_file_are
file_label = path_input.split('/')[-4] + '_' + path_input.split('/')[-3] + '_' + path_input.split('/')[-2] + '_'
file_list_all = sorted(glob.glob(str(path_input + '/*.nc')))
first_day_str = file_list_all[0][-18:-10]
last_day_str = file_list_all[-1][-18:-10]
first_day_int = time_seconds_to_days(time_str_to_seconds(first_day_str,'%Y%m%d'))
last_day_int = time_seconds_to_days(time_str_to_seconds(last_day_str,'%Y%m%d'))
total_number_of_days = last_day_int - first_day_int
print('The data in the folder encompasses', total_number_of_days, 'days')
days_list_int = np.arange(first_day_int, last_day_int + 1)
days_list_str = time_seconds_to_str(time_days_to_seconds(days_list_int),'%Y%m%d')
for day_str in days_list_str:
print('-|' * 20)
file_list_day = sorted(glob.glob(str(path_input + file_label + day_str + '*.nc')))
print('Compiling day', day_str, len(file_list_day), 'files found for this day.')
if len(file_list_day) > 0:
filename_ = file_list_day[0]
print('loading file:', filename_)
netcdf_file_object = nc.Dataset(filename_, 'r')
# variable_names = sorted(netcdf_file_object.variables.keys())
time_raw = netcdf_file_object.variables['time'][:].copy()
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
compiled_time_days = time_seconds_to_days(np.array(time_raw, dtype=int) + file_first_time_stamp)
compiled_raw_reflectivity_array = netcdf_file_object.variables['raw_reflectivity'][:].copy()
compiled_range_array = netcdf_file_object.variables['range'][:].copy()
netcdf_file_object.close()
if len(file_list_day) > 1:
for filename_ in file_list_day[1:]:
print('loading file:', filename_)
netcdf_file_object = nc.Dataset(filename_, 'r')
time_raw = netcdf_file_object.variables['time'][:].copy()
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
time_days = time_seconds_to_days(np.array(time_raw, dtype = int) + file_first_time_stamp)
compiled_time_days = np.append(compiled_time_days, time_days)
raw_reflectivity_array = netcdf_file_object.variables['raw_reflectivity'][:].copy()
compiled_raw_reflectivity_array = np.vstack((compiled_raw_reflectivity_array,
raw_reflectivity_array))
netcdf_file_object.close()
figure_output_name = path_input + file_label + day_str + '.png'
print('saving figure to:', figure_output_name)
p_arr_vectorized_2(compiled_raw_reflectivity_array, compiled_time_days, compiled_range_array/1000,
cmap_=default_cm, figsize_=(12, 8), vmin_=80, vmax_=140,
cbar_label='Raw Reflectivity dB', x_header='UTC',y_header='Range AGL [km]',
figure_filename=figure_output_name,
time_format_ = '%H')
def compile_BASTA_into_one_file(directory_where_nc_file_are):
# compile BASTA data into one netcdf file
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# directory_where_nc_file_are = '/home/luis/Data/BASTA/L0/12m5/'
path_input = directory_where_nc_file_are
file_list_all = sorted(glob.glob(str(path_input + '/*.nc')))
# first_day_str = file_list_all[0][-18:-10]
# last_day_str = file_list_all[-1][-18:-10]
# first_day_int = time_seconds_to_days(time_str_to_seconds(first_day_str,'%Y%m%d'))
# last_day_int = time_seconds_to_days(time_str_to_seconds(last_day_str,'%Y%m%d'))
# days_list_int = np.arange(first_day_int, last_day_int + 1)
# create copy of first file
netcdf_file_object = nc.Dataset(file_list_all[-1], 'r')
last_second_raw = netcdf_file_object.variables['time'][:][-1]
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
netcdf_file_object.close()
last_second_epoc = last_second_raw + file_first_time_stamp
last_time_str = time_seconds_to_str(last_second_epoc, '%Y%m%d_%H%M%S')
output_filename = file_list_all[0][:-3] + '_' + last_time_str + '.nc'
shutil.copyfile(file_list_all[0], output_filename)
print('Created output file with name:', output_filename)
# open output file for appending data
netcdf_output_file_object = nc.Dataset(output_filename, 'a')
file_first_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_output_file_object.variables['time'].units,
time_format_basta)
variable_names = sorted(netcdf_output_file_object.variables.keys())
# create references to variables in output file
variable_objects_dict = {}
for var_name in variable_names:
variable_objects_dict[var_name] = netcdf_output_file_object.variables[var_name]
for filename_ in file_list_all[1:]:
print('-' * 5)
print('loading file:', filename_)
# open file
netcdf_file_object = nc.Dataset(filename_, 'r')
# create file's time series
file_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
time_raw = netcdf_file_object.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
row_start = variable_objects_dict['time'].shape[0]
row_end = time_raw.shape[0] + row_start
# append time array
variable_objects_dict['time'][row_start:row_end] = time_seconds_epoc - file_first_time_stamp_seconds_epoc
# append raw_reflectivity array
variable_objects_dict['raw_reflectivity'][row_start:row_end] = \
netcdf_file_object.variables['raw_reflectivity'][:].copy()
# append raw_velocity array
variable_objects_dict['raw_velocity'][row_start:row_end] = \
netcdf_file_object.variables['raw_velocity'][:].copy()
# append all other variables that only time dependent
for var_name in variable_names:
if var_name != 'time' and var_name != 'range' and \
var_name != 'raw_reflectivity' and var_name != 'raw_velocity':
if len(netcdf_file_object.variables[var_name].shape) == 1:
variable_objects_dict[var_name][row_start:row_end] = \
netcdf_file_object.variables[var_name][:].copy()
netcdf_file_object.close()
netcdf_output_file_object.close()
print('done')
def load_BASTA_data_from_netcdf_to_arrays(filename_):
# load BASTA data from netcdf to arrays
# path_input = '/home/luis/Data/BASTA/L0/'
# filename_ = path_input + 'BASTA_L0_12m5_20180606_071716_20180806_025422.nc'
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# open file
netcdf_file_object = nc.Dataset(filename_, 'r')
# load time as seconds and days
file_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_file_object.variables['time'].units, time_format_basta)
time_raw = netcdf_file_object.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
time_days_epoc = time_seconds_to_days(time_seconds_epoc)
# append range array
array_range = netcdf_file_object.variables['range'][:].copy()
# append raw_reflectivity array
array_raw_reflectivity = netcdf_file_object.variables['raw_reflectivity']#[:].copy()
# append raw_velocity array
array_raw_velocity = netcdf_file_object.variables['raw_velocity']#[:].copy()
# close file
# netcdf_file_object.close()
return array_raw_reflectivity, array_raw_velocity, array_range, time_seconds_epoc, time_days_epoc
def BASTA_load_period_to_dict(start_time_YMDHM, stop_time_YMDHM, folder_path,
variable_names=('time', 'range', 'raw_reflectivity', 'raw_velocity')):
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
out_dict = {}
temp_dict = {}
variables_with_time_dimension = []
if not 'time' in variable_names:
variable_names_temp_list = ['time']
for variable_name in variable_names:
variable_names_temp_list.append(variable_name)
variable_names = variable_names_temp_list
# data_folder
data_folder = folder_path
# get all data files filenames
file_list = sorted(glob.glob(str(data_folder + '\\*.nc')))
file_times_tuple_list = []
file_times_tuple_list_str = []
for i_, filename_ in enumerate(file_list):
file_time_str_start = filename_.split('_')[-2] + filename_.split('_')[-1].split('.')[0]
file_time_sec_start = time_str_to_seconds(file_time_str_start, '%Y%m%d%H%M%S')
if i_ < len(file_list) -1:
file_time_str_stop = file_list[i_+1].split('_')[-2] + file_list[i_+1].split('_')[-1].split('.')[0]
file_time_sec_stop = time_str_to_seconds(file_time_str_stop, '%Y%m%d%H%M%S')
else:
file_time_sec_stop = file_time_sec_start + (24*60*60)
file_times_tuple_list.append(tuple((file_time_sec_start, file_time_sec_stop)))
file_times_tuple_list_str.append(tuple((file_time_str_start, time_seconds_to_str(file_time_sec_stop,
'%Y%m%d%H%M%S'))))
# select only files inside time range
event_start_sec = time_str_to_seconds(start_time_YMDHM, '%Y%m%d%H%M')
event_stop_sec = time_str_to_seconds(stop_time_YMDHM, '%Y%m%d%H%M')
selected_file_list = []
for file_index in range(len(file_list)):
if event_start_sec <= file_times_tuple_list[file_index][0] <= event_stop_sec:
selected_file_list.append(file_list[file_index])
elif event_start_sec <= file_times_tuple_list[file_index][1] <= event_stop_sec:
selected_file_list.append(file_list[file_index])
elif file_times_tuple_list[file_index][0] <= event_start_sec <= file_times_tuple_list[file_index][1]:
selected_file_list.append(file_list[file_index])
elif file_times_tuple_list[file_index][0] <= event_stop_sec <= file_times_tuple_list[file_index][1]:
selected_file_list.append(file_list[file_index])
print('found files:')
p_(selected_file_list)
# load data
if len(selected_file_list) == 0:
print('No files inside time range!')
return out_dict
else:
cnt = 0
for filename_ in selected_file_list:
if cnt == 0:
nc_file = nc.Dataset(filename_, 'r')
print('reading file:',filename_)
for variable_name in variable_names:
if 'time' in nc_file.variables[variable_name].dimensions:
variables_with_time_dimension.append(variable_name)
if variable_name == 'time':
file_time_stamp_seconds_epoc = time_str_to_seconds(nc_file.variables['time'].units,
time_format_basta)
time_raw = nc_file.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
temp_dict[variable_name] = time_seconds_epoc
else:
temp_dict[variable_name] = nc_file.variables[variable_name][:].filled(np.nan)
nc_file.close()
cnt += 1
else:
nc_file = nc.Dataset(filename_, 'r')
print('reading file:', filename_)
for variable_name in variable_names:
if 'time' in nc_file.variables[variable_name].dimensions:
variables_with_time_dimension.append(variable_name)
if len(nc_file.variables[variable_name].shape) == 1:
if variable_name == 'time':
file_time_stamp_seconds_epoc = time_str_to_seconds(nc_file.variables['time'].units,
time_format_basta)
time_raw = nc_file.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
temp_dict[variable_name] = np.hstack((temp_dict[variable_name], time_seconds_epoc))
else:
temp_dict[variable_name] = np.hstack((temp_dict[variable_name],
nc_file.variables[variable_name][:].filled(np.nan)))
else:
temp_dict[variable_name] = np.vstack((temp_dict[variable_name],
nc_file.variables[variable_name][:].filled(np.nan)))
nc_file.close()
# find row for start and end of event
start_row = np.argmin(np.abs(temp_dict['time'] - event_start_sec))
end_row = np.argmin(np.abs(temp_dict['time'] - event_stop_sec))
for variable_name in variable_names:
if variable_name in variables_with_time_dimension:
out_dict[variable_name] = temp_dict[variable_name][start_row:end_row]
else:
out_dict[variable_name] = temp_dict[variable_name]
return out_dict
def MRR_CFAD(range_array, Ze_array, bins_=(12, np.arange(-10, 40, 2)), normalize_height_wise = True, x_header='dBZe',
y_header='Height [km]', custom_y_range_tuple=None, custom_x_range_tuple=None, figure_filename=None,
cbar_label='', cmap_=default_cm, figsize_ = (10,6), title_str = '', contourF_=True, cbar_format='%.2f',
vmin_=None,vmax_=None, grid_=True, fig_ax=None, show_cbar=True, level_threshold_perc=10,
invert_y=False, levels=None,custom_ticks_x=None, custom_ticks_y=None, cbar_ax=None):
if len(range_array.shape) == 1:
temp_array = np.zeros((Ze_array.shape))
for r_ in range(Ze_array.shape[0]):
temp_array[r_,:] = range_array
range_array = temp_array
if type(bins_[0]) == int:
if bins_[0] < 1:
bins_ = (int(range_array.shape[1] * bins_[0]), bins_[1])
hist_out = np.histogram2d(range_array.flatten()[~np.isnan(Ze_array.flatten())] / 1000,
Ze_array.flatten()[~np.isnan(Ze_array.flatten())],
normed=False, bins=bins_)
hist_array, hist_r, hist_c = hist_out
hist_r = (hist_r[:-1] + hist_r[1:]) * 0.5
hist_c = (hist_c[:-1] + hist_c[1:]) * 0.5
hist_r_2d = np.zeros((hist_array.shape), dtype=float)
hist_c_2d = np.zeros((hist_array.shape), dtype=float)
for r_ in range(hist_array.shape[0]):
for c_ in range(hist_array.shape[1]):
hist_r_2d[r_, c_] = hist_r[r_]
hist_c_2d[r_, c_] = hist_c[c_]
# normalize height wise
if normalize_height_wise:
heights_counts = np.sum(hist_array, axis=1)
maximum_count_at_some_height = np.max(heights_counts)
cbar_label_final = 'Height normalized frequency'
for r_ in range(hist_array.shape[0]):
if heights_counts[r_] < maximum_count_at_some_height * (level_threshold_perc/100):
hist_array[r_, :] = np.nan
else:
hist_array[r_, :] = hist_array[r_, :] / heights_counts[r_]
else:
cbar_label_final = 'Normalized frequency'
if cbar_label == '': cbar_label = cbar_label_final
fig_ax = p_arr_vectorized_3(hist_array, hist_c_2d, hist_r_2d, contourF_=contourF_, grid_=grid_,
custom_y_range_tuple=custom_y_range_tuple, custom_x_range_tuple=custom_x_range_tuple,
x_header=x_header, y_header=y_header, cmap_=cmap_, figsize_=figsize_, cbar_ax=cbar_ax,
cbar_label=cbar_label, title_str=title_str, vmin_=vmin_, vmax_=vmax_,levels=levels,
figure_filename=figure_filename, fig_ax=fig_ax,show_cbar=show_cbar, invert_y=invert_y,
custom_ticks_x=custom_ticks_x, custom_ticks_y=custom_ticks_y,cbar_format=cbar_format)
return fig_ax, hist_array.T, hist_c[:-1], hist_r[:-1]
# parsivel
def create_DSD_plot(DSD_arr, time_parsivel_seconds, size_arr, events_period_str, figfilename='',
output_data=False, x_range=(0, 7.5), y_range=(-1, 3.1), figsize_=(5, 5)):
size_series = size_arr[0, :]
event_row_start = time_to_row_str(time_parsivel_seconds, events_period_str.split('_')[0])
event_row_stop_ = time_to_row_str(time_parsivel_seconds, events_period_str.split('_')[1])
# normalize
DSD_arr_over_D = DSD_arr / size_arr
DSD_arr_over_D_by_D = np.sum(DSD_arr_over_D, axis=1)
DSD_arr_over_D_by_D_no_zero = DSD_arr_over_D_by_D * 1
DSD_arr_over_D_by_D_no_zero[DSD_arr_over_D_by_D_no_zero == 0] = np.nan
DSD_arr_over_D_by_D_log = np.log10(DSD_arr_over_D_by_D_no_zero)
DSD_arr_over_D_by_D_log_event_1_bin = np.array(DSD_arr_over_D_by_D_log[event_row_start:event_row_stop_])
DSD_arr_over_D_by_D_log_event_1_bin[~np.isnan(DSD_arr_over_D_by_D_log_event_1_bin)] = 1
DSD_arr_over_D_by_D_log_event_1_bin_sum = np.nansum(DSD_arr_over_D_by_D_log_event_1_bin, axis=0)
DSD_arr_over_D_by_D_log_event_1_meanbyD = np.nanmean(np.array(
DSD_arr_over_D_by_D_log[event_row_start:event_row_stop_]), axis=0)
DSD_arr_over_D_by_D_log_event_1_meanbyD[DSD_arr_over_D_by_D_log_event_1_bin_sum < 10] = np.nan
fig, ax = plt.subplots(figsize=figsize_)
ax.set_title('Mean value of drop concentrations in each diameter bin')
ax.set_xlabel('D [mm]')
ax.set_ylabel('log10 N(D) [m-3 mm-1]')
ax.plot(size_series, DSD_arr_over_D_by_D_log_event_1_meanbyD, '-or', label='Event 1')
ax.set_xlim(x_range)
ax.set_ylim(y_range)
ax.grid()
if figfilename != '':
fig.savefig(figfilename, transparent=True, bbox_inches='tight')
plt.close(fig)
if output_data:
return size_series, DSD_arr_over_D_by_D_log_event_1_meanbyD
def parsivel_nc_format_V2(input_filename, output_filename):
"""
Transform the not so good nc V1 version produced by save_parsivel_arrays_to_netcdf to V2
:param input_filename: output from save_parsivel_arrays_to_netcdf
:param output_filename: a path and filename
:return:
"""
# create file
netcdf_output_file_object = nc.Dataset(output_filename, 'w')
print('created new file')
netcdf_first_file_object = nc.Dataset(input_filename)
# create attributes
netcdf_output_file_object.setncattr('author', '<NAME> (<EMAIL>')
netcdf_output_file_object.setncattr('version', 'V2')
netcdf_output_file_object.setncattr('created', time_seconds_to_str(time.time(), '%Y-%m-%d_%H:%M UTC'))
print('added attributes')
# create list for dimensions and variables
dimension_names_list = sorted(netcdf_first_file_object.dimensions)
variable_names_list = sorted(netcdf_first_file_object.variables)
# create dimensions
for dim_name in dimension_names_list:
if dim_name == 'time':
netcdf_output_file_object.createDimension('time', size=0)
print('time', 'dimension created')
else:
netcdf_output_file_object.createDimension(dim_name,
size=netcdf_first_file_object.dimensions[dim_name].size)
print(dim_name, 'dimension created')
# create variables
# time
var_name = 'time'
netcdf_output_file_object.createVariable(var_name, 'int64', (var_name,), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units',
'seconds since ' + time_seconds_to_str(0, time_format_mod))
time_parsivel_seconds = time_str_to_seconds(np.array(netcdf_first_file_object.variables[var_name][:], dtype=str),
time_format_parsivel)
netcdf_output_file_object.variables[var_name][:] = np.array(time_parsivel_seconds, dtype='int64')
print('created time variable')
# time_YmdHM
var_name = 'YYYYmmddHHMM'
netcdf_output_file_object.createVariable(var_name, 'str', ('time',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'YYYYmmddHHMM in string type')
netcdf_output_file_object.variables[var_name][:] = np.array(netcdf_first_file_object.variables['time'][:],
dtype=str)
print('created time_YmdHM variable')
# particle_fall_speed
var_name = 'particles_spectrum'
if var_name in variable_names_list:
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
netcdf_first_file_object.variables[var_name].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'particle counts per bin per minute')
netcdf_output_file_object.variables[var_name].setncattr('description',
'for each time stamp, the array varies with respect'
' to fall speed on the y axis (rows) starting from the top'
' and varies with respect to size on the x axis (columns) '
'starting from the left')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:].copy()
print('created particles_spectrum variable')
# particle_fall_speed
var_name = 'particle_fall_speed'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
('particle_fall_speed',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'm/s')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:, 0].copy()
print('created particle_fall_speed variable')
# particle_size
var_name = 'particle_size'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
('particle_size',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'mm')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][0, :].copy()
print('created particle_size variable')
# precipitation_intensity
var_name = 'precipitation_intensity'
netcdf_output_file_object.createVariable(var_name,
'float',
netcdf_first_file_object.variables[
'Intensity of precipitation (mm|h)'].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'mm/h')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Intensity of precipitation (mm|h)'][:], dtype=float)
print('created precipitation_intensity variable')
# Weather_code_SYNOP_WaWa
var_name = 'weather_code_SYNOP_WaWa'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code SYNOP WaWa'].dtype,
netcdf_first_file_object.variables['Weather code SYNOP WaWa'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code SYNOP WaWa'][:].copy()
# Weather_code_SYNOP_WaWa
var_name = 'weather_code_METAR_SPECI'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code METAR|SPECI'].dtype,
netcdf_first_file_object.variables['Weather code METAR|SPECI'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code METAR|SPECI'][:].copy()
print('created weather_code_METAR_SPECI variable')
# Weather_code_NWS
var_name = 'weather_code_NWS'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code NWS'].dtype,
netcdf_first_file_object.variables['Weather code NWS'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
NWS_description = '''precip_type_dict = {
'C': 'No Precip',
'Kein Niederschlag': 'No Precip',
'A': 'Hail',
'L': 'Drizzle',
'L+': 'heavy Drizzle',
'L-': 'light Drizzle',
'R': 'Rain',
'R+': 'heavy Rain',
'R-': 'light Rain',
'RL': 'Drizzle and Rain',
'RL+': 'heavy Drizzle and Rain',
'RL-': 'light Drizzle and Rain',
'RLS': 'Rain, Drizzle and Snow',
'RLS+': 'heavy Rain, Drizzle and Snow',
'RLS-': 'light Rain, Drizzle and Snow',
'S': 'Snow',
'S+': 'heavy Snow',
'S-': 'light Snow',
'SG': 'Snow Grains',
'SP': 'Freezing Rain'
}'''
netcdf_output_file_object.variables[var_name].setncattr('description', NWS_description)
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code NWS'][:].copy()
print('created weather_code_NWS variable')
# Radar_reflectivity (dBz)
var_name = 'radar_reflectivity'
netcdf_output_file_object.createVariable(var_name,
'float',
netcdf_first_file_object.variables['Radar reflectivity (dBz)'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'dBz')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Radar reflectivity (dBz)'][:], dtype=float)
print('created radar_reflectivity variable')
# particle_count
var_name = 'particle_count'
netcdf_output_file_object.createVariable(var_name,
'int64',
netcdf_first_file_object.variables[
'Number of detected particles'].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'counts')
netcdf_output_file_object.variables[var_name].setncattr('description', 'Number of detected particles per minute')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Number of detected particles'][:], dtype='int64')
print('created particle_count variable')
# particle_concentration_spectrum
var_name = 'particle_concentration_spectrum'
var_name_old = 'particle_concentration_spectrum_m-3'
if var_name_old in variable_names_list:
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name_old].dtype,
netcdf_first_file_object.variables[var_name_old].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'particles per meter cube per class')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name_old][:].copy()
print('created particle_concentration_spectrum variable')
# N_total
var_name = 'N_total'
var_name_old = 'particle_concentration_total_m-3'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name_old].dtype,
netcdf_first_file_object.variables[var_name_old].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'total particles per meter cube')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name_old][:].copy()
print('created N_total variable')
# psd
var_name = 'psd'
var_name_old = 'particle_concentration_spectrum_m-3'
netcdf_output_file_object.createVariable(var_name,
'float',
('time', 'particle_size',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'particle size distribution, same as '
'particle_concentration_spectrum but all speeds'
'bins are summed, only varies with time and size')
netcdf_output_file_object.variables[var_name][:] = np.sum(netcdf_first_file_object.variables[var_name_old][:],
axis=1)
print('created psd variable')
# rain mask
rain_only_list = ['R', 'R+', 'R-']
RR_ = np.array(netcdf_first_file_object.variables['Intensity of precipitation (mm|h)'][:], dtype=float)
NWS_ = netcdf_first_file_object.variables['Weather code NWS'][:].copy()
rain_mask = np.zeros(RR_.shape[0], dtype=int) + 1
for r_ in range(RR_.shape[0]):
if RR_[r_] > 0 and NWS_[r_] in rain_only_list:
rain_mask[r_] = 0
var_name = 'rain_mask'
netcdf_output_file_object.createVariable(var_name,
'int',
('time',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '0 if rain, 1 if not rain')
netcdf_output_file_object.variables[var_name].setncattr('description', 'using the NWS code, only used R, R+ and R-')
netcdf_output_file_object.variables[var_name][:] = rain_mask
print('rain_mask')
# close all files
netcdf_output_file_object.close()
netcdf_first_file_object.close()
def parsivel_sampling_volume(particle_size_2d, particle_fall_speed_2d):
sampling_area = 0.18 * (0.03 - ((particle_size_2d/1000) / 2)) # m2
sampling_time = 60 # seconds
sampling_height = particle_fall_speed_2d * sampling_time # meters
sampling_volume_2d = sampling_area * sampling_height # m3
return sampling_volume_2d
def load_parsivel_txt_to_array(filename_, delimiter_=';'):
# filename_ = 'C:\\_input\\parsivel_2018-07-26-00_2018-08-02-00_1.txt'
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
# read parsivel file
spectrum_array_list = []
data_array_list = []
with open(filename_) as file_object:
header_ = file_object.readline().split(delimiter_)
line_str = file_object.readline()
line_split = np.array(line_str.split(delimiter_))
if len(line_split) == 17:
line_split[16] = '0'
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32,32)))
elif len(line_split) > 17:
line_split[16] = '0'
data_array_list.append(line_split[:16])
line_split[line_split == ''] = '0'
spectrum_array_list.append(np.array(line_split[16:-1]).reshape((32, 32)))
elif len(line_split) == 16:
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32,32)))
for line in file_object:
line_split = np.array(line.split(delimiter_))
if len(line_split) == 17:
line_split[16] = '0'
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32, 32)))
elif len(line_split) > 17:
line_split[16] = '0'
data_array_list.append(line_split[:16])
line_split[line_split == ''] = '0'
spectrum_array_list.append(np.array(line_split[16:-1]).reshape((32, 32)))
elif len(line_split) == 16:
if line_split[0] != 'Date':
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32, 32)))
data_array = np.stack(data_array_list)
spectrum_array = np.stack(spectrum_array_list).astype(float)
t_list = []
for t_ in range(data_array.shape[0]):
t_list.append(data_array[t_][0] + ' ' + data_array[t_][1])
if len(header_) == 16:
# no spectra was set to record
return data_array, None, t_list, size_array, speed_array, header_
else:
return data_array, spectrum_array, t_list, size_array, speed_array, header_
def save_parsivel_arrays_to_netcdf(raw_spectra_filename, nedcdf_output_filename,
delimiter_=';', raw_time_format='%d.%m.%Y %H:%M:%S'):
# save_parsivel_arrays_to_netcdf('C:\\_input\\parsivel_2018-07-26-00_2018-08-02-00_1.txt', 'C:\\_input\\parsivel_compiled_3.nc')
print('reading txt to array')
data_array, spectrum_array, t_list, size_array, speed_array, header_ = \
load_parsivel_txt_to_array(raw_spectra_filename, delimiter_=delimiter_)
print('arrays created')
file_attributes_tuple_list = [('Compiled by', '<NAME> @: ' + str(datetime.datetime.now())),
('Data source', 'Parsivel Disdrometer'),
('time format', 'YYYYMMDDHHmm in uint64 data type, each ' +
'time stamp is the acumulated precip for one minute')]
# time from str to int
time_array = np.zeros(data_array.shape[0], dtype='<U12')
# for t_ in range(data_array.shape[0]):
# time_array[t_] = int(t_list[t_][6:10] + # YYYY
# t_list[t_][3:5] + # MM
# t_list[t_][:2] + # DD
# t_list[t_][12:14] + # HH
# t_list[t_][15:17]) # mm
for t_ in range(data_array.shape[0]):
time_array[t_] = int(time_seconds_to_str(time_str_to_seconds(t_list[t_],raw_time_format),
time_format_parsivel))
pollutant_attributes_tuple_list = [('units', 'particles per minute')]
# create output file
file_object_nc4 = nc.Dataset(nedcdf_output_filename,'w')#,format='NETCDF4_CLASSIC')
print('output file started')
# create dimensions
file_object_nc4.createDimension('particle_fall_speed', speed_array.shape[0])
file_object_nc4.createDimension('particle_size', size_array.shape[1])
file_object_nc4.createDimension('time', time_array.shape[0])
# create dimension variables
file_object_nc4.createVariable('particle_fall_speed', 'f4', ('particle_fall_speed','particle_size',), zlib=True)
file_object_nc4.createVariable('particle_size', 'f4', ('particle_fall_speed','particle_size',), zlib=True)
file_object_nc4.createVariable('time', 'u8', ('time',), zlib=True)
# populate dimension variables
file_object_nc4.variables['time'][:] = time_array[:]
file_object_nc4.variables['particle_fall_speed'][:] = speed_array[:]
file_object_nc4.variables['particle_size'][:] = size_array[:]
# create particles_spectrum array
if spectrum_array is not None:
file_object_nc4.createVariable('particles_spectrum', 'u2',
('time', 'particle_fall_speed', 'particle_size',), zlib=True)
# populate
file_object_nc4.variables['particles_spectrum'][:] = spectrum_array[:]
# create particle_concentration_spectrum_m-3
# get sampling volume
sampling_volume_2d = parsivel_sampling_volume(size_array, speed_array)
particle_concentration_spectrum = spectrum_array / sampling_volume_2d
# create variable
file_object_nc4.createVariable('particle_concentration_spectrum_m-3', 'float32',
('time', 'particle_fall_speed', 'particle_size',), zlib=True)
# populate
file_object_nc4.variables['particle_concentration_spectrum_m-3'][:] = particle_concentration_spectrum[:]
# create particle_concentration_total_m-3
particle_concentration_total = np.nansum(np.nansum(particle_concentration_spectrum, axis=-1), axis=-1)
# create variable
file_object_nc4.createVariable('particle_concentration_total_m-3', 'float32',
('time', ), zlib=True)
# populate
file_object_nc4.variables['particle_concentration_total_m-3'][:] = particle_concentration_total[:]
for attribute_ in pollutant_attributes_tuple_list:
setattr(file_object_nc4.variables['particles_spectrum'], attribute_[0], attribute_[1])
# create other data variables
for i_, head_ in enumerate(header_[:-1]):
var_name = head_.replace('/','|')
print('storing var name: ' , var_name)
temp_ref = file_object_nc4.createVariable(var_name, str, ('time',), zlib=True)
temp_ref[:] = data_array[:, i_]
for attribute_ in file_attributes_tuple_list:
setattr(file_object_nc4, attribute_[0], attribute_[1])
file_object_nc4.close()
print('Done!')
def load_parsivel_from_nc(netcdf_filename):
netcdf_file_object = nc.Dataset(netcdf_filename, 'r')
file_var_values_dict = {}
variable_name_list = netcdf_file_object.variables.keys()
for var_ in variable_name_list:
file_var_values_dict[var_] = netcdf_file_object.variables[var_][:].copy()
netcdf_file_object.close()
return file_var_values_dict, variable_name_list
def parsivel_plot_spectrum_counts(arr_, title_='', x_range_tuple=(0, 6), y_range_tuple=(0, 10), save_filename=None,
contourF=False, bins_=(0,2,5,10,20,50,100,200), fig_size=(5,5)):
cmap_parsivel = ListedColormap(['white', 'yellow', 'orange', 'lime', 'darkgreen',
'aqua', 'purple', 'navy', 'red'], 'indexed')
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
spectrum_array_color = np.zeros((arr_.shape[0], arr_.shape[1]), dtype=float)
bin_labels = []
i_ = 0
for i_, bin_ in enumerate(bins_):
spectrum_array_color[arr_ > bin_] = i_ + 1
bin_labels.append(str(bin_))
bin_labels[i_] = '>' + bin_labels[i_]
fig, ax = plt.subplots(figsize=fig_size)
if contourF:
quad1 = ax.contourf(size_array, speed_array, spectrum_array_color, cmap=cmap_parsivel,
vmin=0, vmax=8)
else:
quad1 = ax.pcolormesh(size_array, speed_array, spectrum_array_color, cmap=cmap_parsivel,
vmin=0, vmax=8)
ax.set_ylim(y_range_tuple)
ax.set_xlim(x_range_tuple)
ax.set_xlabel('particle size [mm]')
ax.set_ylabel('particle speed [m/s]')
ax.set_title(title_)
cbar_label = 'Particles per bin'
cb2 = fig.colorbar(quad1)#, ticks=[0,1,2,3,4,5,6,7])
ticks_ = np.linspace(0.5, i_+0.5, len(bins_))
cb2.set_ticks(ticks_)
cb2.set_ticklabels(bin_labels)
cb2.ax.set_ylabel(cbar_label)
if save_filename is None:
plt.show()
else:
fig.savefig(save_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return fig, ax
def parsivel_plot_spectrum_DSD(arr_, title_='', x_range_tuple=(0, 6), y_range_tuple=(0, 10), save_filename=None,
contourF=False, fig_size=(5,5), cmap_=default_cm, cbar_label='DSD [m-3]',
nozeros_=True, vmin_=None, vmax_=None,):
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
if nozeros_:
arr_ = np.array(arr_)
arr_[arr_ == 0] = np.nan
fig, ax = plt.subplots(figsize=fig_size)
if contourF:
quad1 = ax.contourf(size_array, speed_array, arr_, cmap=cmap_)
else:
quad1 = ax.pcolormesh(size_array, speed_array, arr_, cmap=cmap_, vmin=vmin_, vmax=vmax_)
ax.set_ylim(y_range_tuple)
ax.set_xlim(x_range_tuple)
ax.set_xlabel('particle size [mm]')
ax.set_ylabel('particle speed [m/s]')
ax.set_title(title_)
cb2 = fig.colorbar(quad1)
cb2.ax.set_ylabel(cbar_label)
if save_filename is None:
plt.show()
else:
fig.savefig(save_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return fig, ax
def calculate_cumulative_precipitation_parsivel(parsivel_precipitation_mm_per_hour, parsivel_time_sec, time_period_str):
return np.nansum(
parsivel_precipitation_mm_per_hour[time_to_row_str(parsivel_time_sec, time_period_str.split('_')[0]):
time_to_row_str(parsivel_time_sec, time_period_str.split('_')[1])]) / 60
def calculate_D_m(N_D, D_series):
D_grad = np.gradient(D_series)
D_m = np.nansum((N_D * (D_series**4) * D_grad)) / np.nansum((N_D * (D_series ** 3) * D_grad))
return D_m
def calculate_LWC(N_D, D_series):
D_grad = np.gradient(D_series)
water_density = 1E6 # g/m3
LWC_ = (np.pi * water_density / 6) * np.nansum((N_D * (D_series**3) * D_grad))
return LWC_
# Holographic microscope
def convert_raw_to_array(filename_):
print('converting file: ' + filename_.split('/')[-1])
A = np.fromfile(filename_, dtype='uint8')
evenEl = A[1::2]
oddEl = A[0::2]
B = 256 * evenEl + oddEl
width = 2592
height = 1944
I = B.reshape(height, width)
return I
def create_video_from_filelist(file_list, output_filename, cmap_):
width = 2592
height = 1944
array_3d = np.zeros((len(file_list), height, width), dtype='uint8')
time_list = []
for t_, filename_ in enumerate(file_list):
array_3d[t_,:,:] = convert_raw_to_array(filename_)
time_list.append(filename_[-21:-4])
create_video_animation_from_3D_array(array_3d, output_filename, colormap_= cmap_, title_list=time_list,
axes_off=True, show_colorbar=False, interval_=500)
def convert_array_to_png_array(array_):
# shape
rows_ = array_.shape[0]
columns_ = array_.shape[1]
# nan layer
array_nan = np.zeros((rows_, columns_), dtype='uint8')
array_nan[array_ != array_] = 100
# replace nans
array_[array_ != array_] = 0
# convert to all positive
array_positive = np.abs(array_)
# sign layer
array_sign = np.zeros((rows_, columns_), dtype='uint8')
array_sign[array_ >= 0] = 100
# zeros array
array_zeros = np.zeros((rows_, columns_), dtype='uint8')
array_zeros[array_positive != 0] = 1
# sub 1 array
array_sub1 = np.zeros((rows_, columns_), dtype='uint8')
array_sub1[array_positive<1] = 1
array_sub1 = array_sub1 * array_zeros
# power array
exp_ = np.array(np.log10(array_positive), dtype=int)
exp_[array_zeros==0] = 0
# integral array
array_integral = array_positive / 10 ** np.array(exp_, dtype=float)
# array_layer_1
array_layer_1 = np.array(((array_sub1 * 9) + 1) * array_integral * 10, dtype='uint8') + array_sign
# array_layer_2
array_layer_2 = np.array(((array_integral * ((array_sub1 * 9) + 1) * 10)
- np.array(array_integral * ((array_sub1 * 9) + 1) * 10, dtype='uint8')) * 100,
dtype='uint8')
array_layer_2 = array_layer_2 + array_nan
# power sign layer
exp_ = exp_ - array_sub1
array_power_sign = np.zeros((rows_, columns_), dtype='uint8')
array_power_sign[exp_ >= 0] = 100
# array_layer_3
array_layer_3 = np.abs(exp_) + array_power_sign
# initialize out array
out_array = np.zeros((rows_, columns_, 3), dtype='uint8')
# dump into out array
out_array[:, :, 0] = array_layer_1
out_array[:, :, 1] = array_layer_2
out_array[:, :, 2] = array_layer_3
return out_array
# netcdf file handling
def netCDF_crop_timewise(input_filename, time_stamp_start_str_YYYYmmDDHHMM, time_stamp_stop_str_YYYYmmDDHHMM,
output_filename=None, vars_to_keep=None, time_dimension_name='time'):
"""
Creates a copy of an input netCDF4 file with only a subset of the data
:param input_filename: netCDF4 file with path
:param time_stamp_start_str_YYYYmmDDHHMMSS: String in YYYYmmDDHHMMSS format
:param time_stamp_stop_str_YYYYmmDDHHMMSS:
:param output_filename: filename with path and .nc extension. If none, output file will be in same folder as input
:param vars_to_keep: list of variable names in str to be kept in output copy. If none, all variables will be copied
:param time_dimension_name: name of time dimension
:return: 0 if good, filename if error
"""
error_file = 0
try:
nc_input_file = nc.Dataset(input_filename)
time_array = nc_input_file.variables[time_dimension_name][:].copy()
nc_input_file.close()
r_1 = time_to_row_str(time_array, time_stamp_start_str_YYYYmmDDHHMM)
r_2 = time_to_row_str(time_array, time_stamp_stop_str_YYYYmmDDHHMM)
dict_ = load_netcdf_to_dictionary(input_filename, var_list=vars_to_keep,
time_tuple_start_stop_row=(r_1,r_2), time_dimension_name=time_dimension_name)
if output_filename is None:
output_filename = input_filename[:-3] + '_trimmed_' + str(r_1) + '_' + str(r_2) + '.nc'
save_dictionary_to_netcdf(dict_, output_filename)
except BaseException as error_msg:
print(error_msg)
error_file = input_filename
return error_file
def add_variable_to_netcdf_file(nc_filename, variables_dict):
"""
Opens and adds a variable(s) to the file. Will not add new dimensions.
:param nc_filename: str including path
:param variables_dict:
must be a dictionary with keys as variables. inside each variables key should have a dictionary
inside with variable names as keys
Each var most have a data key equal to a numpy array (can be masked) and a attribute key
Each var most have a dimensions key equal to a tuple, in the same order as the array's dimensions
Each var most have a attributes key equal to a list of tuples with name and description text
:return: None
"""
# check if dict_ has the right format
# create dimension and variables lists
vars_list = variables_dict.keys()
for var_ in vars_list:
if 'dimensions' in variables_dict[var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its dimensions')
return
if 'attributes' in variables_dict[var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its attributes')
return
# open file
file_obj = nc.Dataset(nc_filename,'a')
print('file openned, do not close this threat or file might be corrupted')
try:
# check that variable shapes agree with destination file
for var_ in vars_list:
dim_list = list(variables_dict[var_]['dimensions'])
var_shape = variables_dict[var_]['data'].shape
for i_, dim_ in enumerate(dim_list):
if dim_ in sorted(file_obj.dimensions):
if var_shape[i_] == file_obj.dimensions[dim_].size:
pass
else:
print('Variable', var_, 'has dimension', dim_,
'of different size compared to destination file\nfile closed')
file_obj.close()
return
else:
print('Variable', var_, 'has dimension', dim_,
'which does not exist in destination file\nfile closed')
file_obj.close()
return
# create variables
print('creating', var_, 'variable')
file_obj.createVariable(var_,
variables_dict[var_]['data'].dtype,
variables_dict[var_]['dimensions'], zlib=True)
# populate variables
file_obj.variables[var_][:] = variables_dict[var_]['data']
for var_attr in variables_dict[var_]['attributes']:
if var_attr[0] == '_FillValue' or var_attr[0] == 'fill_value':
pass
else:
setattr(file_obj.variables[var_], var_attr[0], var_attr[1])
print('created', var_, 'variable')
except BaseException as error_msg:
file_obj.close()
print('error, file closed\n', error_msg)
print('All good, closing file')
file_obj.close()
print('Done!')
def save_dictionary_to_netcdf(dict_, output_filename):
"""
Saves a dictionary with the right format to a netcdf file. First dim will be set to unlimited.
:param dict_: must have a dimensions key, a variables key, and a attributes key.
dimensions key should have a list of the names of the dimensions
variables key should have a dictionary inside with variable names as keys
attributes key should have a list of tuples inside, with the name of the attribute and description in each tuple
Each var most have a data key equal to a numpy array (can be masked) and a attribute key
Each var most have a dimensions key equal to a tuple, in the same order as the array's dimensions
all attributes are tuples with name and description text
:param output_filename: should include full path and extension
:return: None
"""
# check if dict_ has the right format
if 'variables' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing variables key')
return
if 'dimensions' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing dimensions key')
return
if 'attributes' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing attributes key')
return
# create dimension and variables lists
vars_list = dict_['variables'].keys()
dims_list = dict_['dimensions']
for dim_ in dims_list:
if dim_ in vars_list:
pass
else:
print('dictionary has the wrong format, ' + dim_ + 'dimension is missing from variables')
for var_ in vars_list:
if 'dimensions' in dict_['variables'][var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its dimensions')
return
if 'attributes' in dict_['variables'][var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its attributes')
return
# create output file
file_obj = nc.Dataset(output_filename,'w')#,format='NETCDF4_CLASSIC')
print('output file started')
# populate file's attributes
for attribute_ in dict_['attributes']:
setattr(file_obj, attribute_[0], attribute_[1])
# create dimensions
for i_, dim_ in enumerate(dims_list):
if i_ == 0:
file_obj.createDimension(dim_, size=0)
else:
shape_index = np.argwhere(np.array(dict_['variables'][dim_]['dimensions']) == dim_)[0][0]
file_obj.createDimension(dim_, dict_['variables'][dim_]['data'].shape[shape_index])
print('dimensions created')
# create variables
for var_ in vars_list:
print('creating', var_, 'variable')
file_obj.createVariable(var_,
dict_['variables'][var_]['data'].dtype,
dict_['variables'][var_]['dimensions'], zlib=True)
# populate variables
file_obj.variables[var_][:] = dict_['variables'][var_]['data']
for var_attr in dict_['variables'][var_]['attributes']:
if isinstance(var_attr, str):
setattr(file_obj.variables[var_], dict_['variables'][var_]['attributes'][0],
dict_['variables'][var_]['attributes'][1])
break
else:
if var_attr[0] == '_FillValue' or var_attr[0] == 'fill_value':
pass
else:
setattr(file_obj.variables[var_], var_attr[0], var_attr[1])
print('created', var_, 'variable')
print('storing data to disk and closing file')
file_obj.close()
print('Done!')
def load_netcdf_to_dictionary(filename_, var_list=None, time_tuple_start_stop_row=None, time_dimension_name='time'):
"""
creates a dictionary from a netcdf file, with the following format
:param filename_: filename with path of a netCDF4 file
:param var_list: list of variables to be loaded, if none, all variables will be loaded
:param time_tuple_start_stop_str: tuple with two time rows, time dimension will be trimmed r_1:r_2
:param time_dimension_name: name of time dimension
:return: dict_: have a dimensions key, a variables key, and a attributes key.
Each var have a data key equal to a numpy array (can be masked) and a attribute key
Each var have a dimensions key equal to a tuple, in the same order as the array's dimensions
all attributes are tuples with name and description text
"""
# create output dict
out_dict = {}
# open file
file_obj = nc.Dataset(filename_, 'r') # ,format='NETCDF4_CLASSIC')
print('output file started')
# get file's attr
file_att_list_tuple = []
for attr_ in file_obj.ncattrs():
file_att_list_tuple.append((attr_, file_obj.getncattr(attr_)))
out_dict['attributes'] = file_att_list_tuple
# get dimensions
out_dict['dimensions'] = sorted(file_obj.dimensions)
# get variables
if var_list is None:
var_list = sorted(file_obj.variables)
out_dict['variables'] = {}
# create variables
for var_ in var_list:
out_dict['variables'][var_] = {}
if time_tuple_start_stop_row is not None:
if time_dimension_name in file_obj.variables[var_].dimensions:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][time_tuple_start_stop_row[0]:
time_tuple_start_stop_row[1]]
else:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][:]
else:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][:]
out_dict['variables'][var_]['attributes'] = file_obj.variables[var_].ncattrs()
var_att_list_tuple = []
for attr_ in file_obj.variables[var_].ncattrs():
var_att_list_tuple.append((attr_, file_obj.variables[var_].getncattr(attr_)))
out_dict['variables'][var_]['attributes'] = var_att_list_tuple
out_dict['variables'][var_]['dimensions'] = file_obj.variables[var_].dimensions
print('read variable', var_)
file_obj.close()
print('Done!')
return out_dict
def merge_multiple_netCDF_by_time_dimension(directory_where_nc_file_are_in_chronological_order, output_path='',
output_filename=None, time_variable_name='time', time_dimension_name=None,
vars_to_keep=None, nonTimeVars_check_list=None,
key_search_str='', seek_in_subfolders=False, force_file_list=None):
if force_file_list is not None:
file_list_all = sorted(force_file_list)
else:
if seek_in_subfolders:
if key_search_str == '':
file_list_all = sorted(list_files_recursive(directory_where_nc_file_are_in_chronological_order))
else:
file_list_all = sorted(list_files_recursive(directory_where_nc_file_are_in_chronological_order,
filter_str=key_search_str))
else:
file_list_all = sorted(glob.glob(str(directory_where_nc_file_are_in_chronological_order
+ '*' + key_search_str + '*.nc')))
print('Files to be merged (in this order):')
parameter_list = ''
for i, parameter_ in enumerate(file_list_all):
parameter_list = str(parameter_list) + str(i) + " ---> " + str(parameter_) + '\n'
print(parameter_list)
# create copy of first file
if output_filename is None:
if output_path == '':
output_filename = file_list_all[0][:-3] + '_merged.nc'
else:
output_filename = output_path + file_list_all[0].split('\\')[-1][:-3] + '_merged.nc'
# define time variable and dimension
if time_dimension_name is None:
time_dimension_name = time_variable_name
# check if time dimension is unlimited
netcdf_first_file_object = nc.Dataset(file_list_all[0], 'r')
if netcdf_first_file_object.dimensions[time_dimension_name].size == 0 and vars_to_keep is None:
# all good, just make copy of file with output_filename name
netcdf_first_file_object.close()
shutil.copyfile(file_list_all[0], output_filename)
print('first file in merger list has unlimited time dimension, copy created with name:', output_filename)
else:
# not so good, create new file and copy everything from first, make time dimension unlimited...
netcdf_output_file_object = nc.Dataset(output_filename, 'w')
print('first file in merger list does not have unlimited time dimension, new file created with name:',
output_filename)
# copy main attributes
attr_list = netcdf_first_file_object.ncattrs()
for attr_ in attr_list:
netcdf_output_file_object.setncattr(attr_, netcdf_first_file_object.getncattr(attr_))
print('main attributes copied')
# create list for dimensions and variables
dimension_names_list = sorted(netcdf_first_file_object.dimensions)
if vars_to_keep is None:
variable_names_list = sorted(netcdf_first_file_object.variables)
else:
variable_names_list = vars_to_keep
# create dimensions
for dim_name in dimension_names_list:
if dim_name == time_dimension_name:
netcdf_output_file_object.createDimension(time_dimension_name, size=0)
print(time_variable_name, 'dimension created')
else:
netcdf_output_file_object.createDimension(dim_name,
size=netcdf_first_file_object.dimensions[dim_name].size)
print(dim_name, 'dimension created')
# create variables
for var_name in variable_names_list:
# create
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
netcdf_first_file_object.variables[var_name].dimensions, zlib=True)
print(var_name, 'variable created')
# copy the attributes
attr_list = netcdf_first_file_object.variables[var_name].ncattrs()
for attr_ in attr_list:
netcdf_output_file_object.variables[var_name].setncattr(attr_,
netcdf_first_file_object.variables[
var_name].getncattr(attr_))
print('variable attributes copied')
# copy the data to the new file
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:].copy()
print('variable data copied')
print('-=' * 20)
# close all files
netcdf_output_file_object.close()
netcdf_first_file_object.close()
print('starting to copy other files into merged file')
vars_list = variable_names_list
for filename_ in file_list_all[1:]:
# open output file for appending data
netcdf_output_file_object = nc.Dataset(output_filename, 'a')
print('-' * 5)
print('loading file:', filename_)
# open hourly file
netcdf_file_object = nc.Dataset(filename_, 'r')
# get time array
time_hourly = np.array(netcdf_file_object.variables[time_variable_name][:], dtype=float)
row_start = netcdf_output_file_object.variables[time_variable_name].shape[0]
row_end = time_hourly.shape[0] + row_start
# append time array
netcdf_output_file_object.variables[time_variable_name][row_start:row_end] = time_hourly
# append all other variables that only time dependent
for var_name in vars_list:
if var_name != time_variable_name:
if time_dimension_name in netcdf_output_file_object.variables[var_name].dimensions:
netcdf_output_file_object.variables[var_name][row_start:row_end] = \
netcdf_file_object.variables[var_name][:].copy()
# check non time dependent variables for consistency
vars_list_sub = sorted(netcdf_file_object.variables)
if vars_list_sub != sorted(netcdf_first_file_object.variables):
print('Alert! Variables in first file are different than other files')
print('first file variables:')
p_(sorted(netcdf_first_file_object.variables))
print(filename_, 'file variables:')
p_(vars_list_sub)
if nonTimeVars_check_list is not None:
for var_name in nonTimeVars_check_list:
if np.nansum(np.abs(netcdf_file_object.variables[var_name][:].copy() -
netcdf_output_file_object.variables[var_name][:].copy())) != 0:
print('Alert!', var_name, 'from file:', filename_, 'does not match the first file')
# copy the attributes
netcdf_output_file_object.variables[var_name].setncattr(
'values from file ' + filename_, netcdf_file_object.variables[var_name][:].copy()
)
netcdf_file_object.close()
netcdf_output_file_object.close()
print('done')
def load_netcdf_file_variable(filename_, variable_name_list=None):
netcdf_file_object = nc.Dataset(filename_, 'r')
file_attributes_dict = {}
file_var_values_dict = {}
file_var_attrib_dict = {}
file_dim_dict = {}
if variable_name_list is None: variable_name_list = list(netcdf_file_object.variables)
for atr_ in netcdf_file_object._attributes:
file_attributes_dict[atr_] = netcdf_file_object._attributes[atr_]
for dim_ in netcdf_file_object.dimensions:
file_dim_dict[dim_] = netcdf_file_object.dimensions[dim_]
for var_ in variable_name_list:
file_var_values_dict[var_] = netcdf_file_object.variables[var_][:].copy()
for atr_ in netcdf_file_object.variables[var_]._attributes:
file_var_attrib_dict[var_] = netcdf_file_object.variables[var_]._attributes[atr_]
netcdf_file_object.close()
return file_attributes_dict, file_var_values_dict, file_var_attrib_dict, file_dim_dict
def save_array_list_as_netcdf(array_list, name_list, units_list, attributes_list, out_filename):
file_object = nc.Dataset(out_filename, 'w')
# file_object.history = 'Created for a test'
for variable_ in range(len(array_list)):
dim_list_name = []
for dim_ in range(len(array_list[variable_].shape)):
dim_name = str(variable_) + '_' + str(dim_)
dim_list_name.append(dim_name)
file_object.createDimension(dim_name, array_list[variable_].shape[dim_])
dtype_ = str(array_list[variable_].dtype)[0]
file_object.createVariable( name_list[variable_], dtype_, tuple(dim_list_name) )
setattr(file_object.variables[name_list[variable_]], 'units',units_list[variable_])
file_object.variables[name_list[variable_]] = array_list[variable_]
# temp_variable_handle[:] = array_list[variable_][:]
for atri_ in attributes_list:
setattr(file_object, atri_[0], atri_[1])
file_object.close()
def save_time_series_as_netcdf(array_list, name_list, units_list, attributes_list, out_filename):
file_object = nc.Dataset(out_filename, 'w')
# create time dimension
file_object.createDimension('time', array_list[0].shape[0])
for variable_ in range(len(array_list)):
dtype_ = str(array_list[variable_].dtype)[0]
if dtype_ == '<': dtype_ = 'S1'
file_object.createVariable(name_list[variable_], dtype_, ('time',))
setattr(file_object.variables[name_list[variable_]], 'units',units_list[variable_])
file_object.variables[name_list[variable_]][:] = array_list[variable_][:]
# temp_variable_handle[:] = array_list[variable_][:]
for atri_ in attributes_list:
setattr(file_object, atri_[0], atri_[1])
file_object.close()
def save_emissions_to_new_netcdf(out_filename, emissions_array, pollutant_name, time_array, lat_array, lon_array,
file_attributes_tuple_list, pollutant_attributes_tuple_list):
file_object = nc.Dataset(out_filename, 'w')
# create dimensions
file_object.createDimension('lat', lat_array.shape[0])
file_object.createDimension('lon', lon_array.shape[0])
file_object.createDimension('time', time_array.shape[0])
# create dimension variables
file_object.createVariable('time', str(time_array.dtype)[0], ('time', ))
file_object.createVariable('lat', str(lat_array.dtype)[0], ('lat',))
file_object.createVariable('lon', str(lon_array.dtype)[0], ('lon',))
# populate dimension variables
file_object.variables['time'][:] = time_array[:]
file_object.variables['lat'][:] = lat_array[:]
file_object.variables['lon'][:] = lon_array[:]
# create emission array
file_object.createVariable(pollutant_name, str(emissions_array.dtype)[0], ('time', 'lat', 'lon',))
# populate
file_object.variables[pollutant_name][:] = emissions_array[:]
for attribute_ in file_attributes_tuple_list:
setattr(file_object, attribute_[0], attribute_[1])
for attribute_ in pollutant_attributes_tuple_list:
setattr(file_object.variables[pollutant_name], attribute_[0], attribute_[1])
file_object.close()
def save_emissions_to_existing_netcdf(out_filename, emissions_array, pollutant_name, attributes_tuple_list):
file_object = nc.Dataset(out_filename, 'a')
file_object.createVariable(pollutant_name, str(emissions_array.dtype)[0], ('time', 'lat', 'lon',))
file_object.variables[pollutant_name][:] = emissions_array[:]
setattr(file_object.variables[pollutant_name], 'pollutant name', pollutant_name)
for attribute_ in attributes_tuple_list:
setattr(file_object.variables[pollutant_name], attribute_[0], attribute_[1])
file_object.close()
def WRF_emission_file_modify(filename_, variable_name, cell_index_west_east, cell_index_south_north, new_value):
netcdf_file_object = nc.Dataset(filename_, 'a')
current_array = netcdf_file_object.variables[variable_name][0,0,:,:].copy()
current_value = current_array[cell_index_south_north, cell_index_west_east]
print(current_value)
current_array[cell_index_south_north, cell_index_west_east] = new_value
netcdf_file_object.variables[variable_name][0,0,:,:] = current_array[:,:]
netcdf_file_object.close()
def find_wrf_3d_cell_from_latlon_to_south_north_west_east(lat_, lon_, wrf_output_filename,
wrf_lat_variablename='XLAT', wrf_lon_variablename='XLONG',
flatten_=False):
netcdf_file_object_wrf = nc.Dataset(wrf_output_filename, 'r')
wrf_lat_array = netcdf_file_object_wrf.variables[wrf_lat_variablename][:,:].copy()
wrf_lon_array = netcdf_file_object_wrf.variables[wrf_lon_variablename][:,:].copy()
netcdf_file_object_wrf.close()
wrf_abs_distance = ( (np.abs(wrf_lat_array - lat_)**2) + (np.abs(wrf_lon_array - lon_)**2) )**0.5
if flatten_:
return np.argmin(wrf_abs_distance)
else:
return np.unravel_index(np.argmin(wrf_abs_distance), wrf_abs_distance.shape)
# specialized tools
def vectorize_array(array_):
output_array = np.zeros((array_.shape[0] * array_.shape[1], 3), dtype=float)
for r_ in range(array_.shape[0]):
for c_ in range(array_.shape[1]):
output_array[r_,0] = r_
output_array[r_, 1] = c_
output_array[r_, 2] = array_[r_,c_]
return output_array
def exceedance_rolling(arr_time_seconds, arr_values, standard_, rolling_period, return_rolling_arrays=False):
## assumes data is in minutes and in same units as standard
time_secs_1h, values_mean_disc_1h = mean_discrete(arr_time_seconds, arr_values, 3600, arr_time_seconds[0], min_data=45)
values_rolling_mean = row_average_rolling(values_mean_disc_1h, rolling_period)
counter_array = np.zeros(values_rolling_mean.shape[0])
counter_array[values_rolling_mean > standard_] = 1
total_number_of_exceedances = np.sum(counter_array)
#create date str array
T_ = np.zeros((time_secs_1h.shape[0],5),dtype='<U32')
for r_ in range(time_secs_1h.shape[0]):
if time_secs_1h[r_] == time_secs_1h[r_]:
T_[r_] = time.strftime("%Y_%m_%d",time.gmtime(time_secs_1h[r_])).split(',')
exceedance_date_list = []
for r_, rolling_stamp in enumerate(values_rolling_mean):
if rolling_stamp > standard_:
exceedance_date_list.append(T_[r_])
exc_dates_array = np.array(exceedance_date_list)
exc_dates_array_unique = np.unique(exc_dates_array)
if return_rolling_arrays:
return total_number_of_exceedances, exc_dates_array_unique, time_secs_1h, values_rolling_mean
else:
return total_number_of_exceedances, exc_dates_array_unique
# ozonesonde and radiosonde related
def load_sonde_data(filename_, mode_='PBL'): ##Loads data and finds inversions, creates I_
# global V_, M_, H_, ASL_, time_header, I_, I_line
# global ASL_avr, L_T, L_RH, time_string, time_days, time_seconds, year_, flight_name
## user defined variables
delimiter_ = ','
error_flag = -999999
first_data_header = 'Day_[GMT]'
day_column_number = 0
month_column_number = 1
year_column_number = 2
hour_column_number = 3
minute_column_number = 4
second_column_number = 5
# time_header = 'Local Time' # defining time header
# main data array
sample_data = filename_
# look for data start (header size)
with open(sample_data) as file_read:
header_size = -1
r_ = 0
for line_string in file_read:
if (len(line_string) >= len(first_data_header) and
line_string[:len(first_data_header)] == first_data_header):
header_size = r_
break
r_ += 1
if header_size == -1:
print('no data found!')
sys.exit()
data_array = np.array(genfromtxt(sample_data,
delimiter=delimiter_,
skip_header=header_size,
dtype='<U32'))
# defining header and data arrays
M_ = data_array[1:, 6:].astype(float)
H_ = data_array[0, 6:]
ASL_ = M_[:, -1]
# year_ = data_array[1, year_column_number]
ASL_[ASL_ == error_flag] = np.nan
# defining time arrays
time_str = data_array[1:, 0].astype('<U32')
for r_ in range(time_str.shape[0]):
time_str[r_] = (str(data_array[r_ + 1, day_column_number]) + '-' +
str(data_array[r_ + 1, month_column_number]) + '-' +
str(data_array[r_ + 1, year_column_number]) + '_' +
str(data_array[r_ + 1, hour_column_number]) + ':' +
str(data_array[r_ + 1, minute_column_number]) + ':' +
str(data_array[r_ + 1, second_column_number]))
time_days = np.array([mdates.date2num(datetime.datetime.utcfromtimestamp(
calendar.timegm(time.strptime(time_string_record, '%d-%m-%Y_%H:%M:%S'))))
for time_string_record in time_str])
time_seconds = time_days_to_seconds(time_days)
V_ = M_.astype(float)
V_[V_ == error_flag] = np.nan
T_avr = np.ones(V_[:, 1].shape)
RH_avr = np.ones(V_[:, 1].shape)
ASL_avr = np.ones(V_[:, 1].shape)
L_T = np.zeros(V_[:, 1].shape)
L_RH = np.zeros(V_[:, 1].shape)
I_ = np.zeros(V_[:, 1].shape)
I_[:] = np.nan
# rolling average of T RH and ASL
mean_size = 7 # 5
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
T_avr[r_] = np.nanmean(V_[r_ - mean_size: r_ + mean_size, 1])
RH_avr[r_] = np.nanmean(V_[r_ - mean_size: r_ + mean_size, 2])
ASL_avr[r_] = np.nanmean(ASL_[r_ - mean_size: r_ + mean_size])
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if (ASL_avr[r_ + 1] - ASL_avr[r_]) > 0:
L_T[r_] = ((T_avr[r_ + 1] - T_avr[r_]) /
(ASL_avr[r_ + 1] - ASL_avr[r_]))
L_RH[r_] = ((RH_avr[r_ + 1] - RH_avr[r_]) /
(ASL_avr[r_ + 1] - ASL_avr[r_]))
# define location of inversion
# PBL or TSI
if mode_ == 'PBL':
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if L_T[r_] > 7 and L_RH[r_] < -20: # PBL = 7,20 / TSI = 20,200
I_[r_] = 1
# get one of I_ only per layer
temperature_gap = .4 # kilometres
I_line = np.zeros((1, 3)) # height, time, intensity
if np.nansum(I_) > 1:
r_ = -1
while r_ < I_.shape[0] - mean_size:
r_ += 1
if I_[r_] == 1 and ASL_avr[r_] < 4:
layer_temp = T_avr[r_]
layer_h = ASL_avr[r_]
layer_time = time_seconds[r_]
for rr_ in range(r_, I_.shape[0] - mean_size):
if T_avr[rr_] < layer_temp - temperature_gap:
delta_h = ASL_avr[rr_] - layer_h
altitude_ = layer_h
stanking_temp = np.array([altitude_, layer_time, delta_h])
I_line = np.row_stack((I_line, stanking_temp))
r_ = rr_
break
if np.max(I_line[:, 0]) != 0:
I_line = I_line[1:, :]
else:
I_line[:, :] = np.nan
else:
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if L_T[r_] > 20 and L_RH[r_] < -200: # PBL = 7,20 / TSI = 20,200
I_[r_] = 1
# get one of I_ only per layer
temperature_gap = .4 # kilometres
I_line = np.zeros((1, 3)) # height, time, intensity
if np.nansum(I_) > 1:
r_ = -1
while r_ < I_.shape[0] - mean_size:
r_ += 1
if I_[r_] == 1 and 4 < ASL_avr[r_] < 8:
layer_temp = T_avr[r_]
layer_h = ASL_avr[r_]
layer_time = time_seconds[r_]
for rr_ in range(r_, I_.shape[0] - mean_size):
if T_avr[rr_] < layer_temp - temperature_gap:
delta_h = ASL_avr[rr_] - layer_h
altitude_ = layer_h
stanking_temp = np.array([altitude_, layer_time, delta_h])
I_line = np.row_stack((I_line, stanking_temp))
r_ = rr_
break
if np.max(I_line[:, 0]) != 0:
I_line = I_line[1:, :]
else:
I_line[:, :] = np.nan
return H_, V_, time_days, time_seconds, I_, I_line, L_T, L_RH
def plot_X1_X2_Y(X1_blue, X2_green, Y):
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ax1.plot(X1_blue, Y, s=5, color='b', edgecolor='none')
ax1.axvline(0, c='k')
ax2.scatter(X2_green, Y, s=5, color='g', edgecolor='none')
ax2.axvline(0, c='k')
plt.show()
def plot_T_RH_I_(V_, I_line):
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ASL_ = V_[:, -1]
ax1.set_ylabel('ASL')
ax1.set_xlabel('Temp')
ax2.set_xlabel('RH')
ax1.scatter(V_[:, 1], ASL_, s=5, color='b', edgecolor='none')
ax1.axvline(0, c='k')
RH_temp = V_[:, 2]
RH_temp = RH_temp
ax2.scatter(RH_temp, ASL_, s=5, color='g', edgecolor='none')
ax2.axvline(0, c='k')
for x in range(I_line.shape[0]):
plt.axhline(I_line[x, 0], c='r')
plt.show()
def plot_ThetaVirtual_I_(V_, I_line):
fig, ax1 = plt.subplots()
ASL_ = V_[:, -1]
ax1.set_ylabel('ASL')
ax1.set_xlabel('Virtual Potential Temperature [K]')
ax1.scatter(V_[:, 5], ASL_, s=5, color='b', edgecolor='none')
for x in range(I_line.shape[0]):
plt.axhline(I_line[x, 0], c='r')
plt.show()
def last_lat_lon_alt_ozonesonde(filename_):
data_array = genfromtxt(filename_, delimiter=',', dtype='<U32', skip_header=23)
return data_array[-1,31], data_array[-1,32], data_array[-1,33], data_array[-1,0]
def load_khancoban_sondes(filename_):
line_number = -1
dict_ = {}
dict_['filename'] = filename_.split('\\')[-1]
dict_['date'] = '20' + filename_.split('\\')[-1][2:]
profile_header = []
profile_units = []
profile_data = []
with open(filename_) as file_object:
for line in file_object:
line_number += 1
line_items = line.split()
if 17 <= line_number <= 35:
profile_header.append(line_items[0])
profile_units.append(line_items[1])
if line_number >= 39 and len(line_items)>1:
profile_data.append(line_items)
profile_array = np.zeros((len(profile_data), len(profile_data[0])), dtype=float)
for r_ in range(len(profile_data)):
profile_array[r_, :] = profile_data[r_]
for c_ in range(len(profile_header)):
dict_[profile_header[c_]] = {}
dict_[profile_header[c_]]['data'] = profile_array[:, c_]
dict_[profile_header[c_]]['units'] = profile_units[c_]
return dict_
def convert_khan_sonde_data_to_skewt_dict(khan_dict, sonde_name):
# create time array in seconds since epoc
date_seconds = time_str_to_seconds(khan_dict[sonde_name]['date'], '%Y%m%d.0%H')
time_sonde_sec = date_seconds + khan_dict[sonde_name]['time']['data']
mydata_0=dict(zip(('hght','pres','temp','dwpt', 'sknt', 'drct', 'relh', 'time', 'lati', 'long'),
(khan_dict[sonde_name]['Height']['data'],
khan_dict[sonde_name]['P']['data'],
kelvin_to_celsius(khan_dict[sonde_name]['T']['data']),
kelvin_to_celsius(khan_dict[sonde_name]['TD']['data']),
ws_ms_to_knots(khan_dict[sonde_name]['FF']['data']),
khan_dict[sonde_name]['DD']['data'],
khan_dict[sonde_name]['RH']['data'],
time_sonde_sec,
khan_dict[sonde_name]['Lat']['data'],
khan_dict[sonde_name]['Lon']['data']
)))
return mydata_0
# data averaging
def average_all_data_files(filename_, number_of_seconds, WD_index = None, WS_index = None,
min_data_number=None, cumulative_parameter_list=None):
header_, values_ = load_time_columns(filename_)
time_sec = time_days_to_seconds(values_[:,0])
# wind tratment
if WD_index is not None and WS_index is not None:
print('wind averaging underway for parameters: ' + header_[WD_index] + ' and ' + header_[WS_index])
# converting wind parameters to cartesian
WD_ = values_[:,WD_index]
WS_ = values_[:,WS_index]
North_, East_ = polar_to_cart(WD_, WS_)
values_[:,WD_index] = North_
values_[:,WS_index] = East_
# averaging
if min_data_number is None: min_data_number = int(number_of_seconds/60 * .75)
if cumulative_parameter_list is None:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx= None)
else:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx=np.array(cumulative_parameter_list) - 2)
if WD_index is not None and WS_index is not None:
# converting wind parameters to polar
North_ = Values_mean[:,WD_index - 2]
East_ = Values_mean[:,WS_index - 2]
WD_, WS_ = cart_to_polar(North_, East_)
Values_mean[:,WD_index - 2] = WD_
Values_mean[:,WS_index - 2] = WS_
output_filename = filename_.split('.')[0]
output_filename += '_' + str(int(number_of_seconds/60)) + '_minute_mean' + '.csv'
save_array_to_disk(header_[2:], Index_mean, Values_mean, output_filename)
print('Done!')
print('saved at: ' + output_filename)
def median_discrete(Index_, Values_, avr_size, first_index, min_data=1, position_=0.0):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
return None, None
if Index_[-1] < first_index:
return None, None
if min_data < 1:
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_median = | np.zeros(total_averaged_rows) | numpy.zeros |
#!/usr/bin/env python
"""
This script grew-up from test of specific issue -
2021-10-18:
Xiaozhe complains that too many pixels outside signal region in ueddaq02 r401 shows up in selection of intensities between 100 and 500 keV.
See:
- `github: <https://github.com/slac-lcls/lcls2>`_.
- `confluence: <https://confluence.slac.stanford.edu/display/PSDM/EPIXQUAD+ueddaq02+r401+issue+calib+hot+banks+2021-10-18>`_.
Created on 2021-10-18 by <NAME>
"""
import sys
SCRNAME = sys.argv[0].rsplit('/')[-1]
if len(sys.argv)<2: sys.exit( '\nMISSING PARAMETERS\nTry: %s -h' % SCRNAME)
import math
import numpy as np
import logging
logger = logging.getLogger(__name__)
DICT_NAME_TO_LEVEL = logging._nameToLevel # {'INFO': 20, 'WARNING': 30, 'WARN': 30,...
from psana.pyalgos.generic.NDArrUtils import info_ndarr, divide_protected
from psana import DataSource
from psana.detector.UtilsGraphics import gr, fleximagespec#, fleximage, flexhist
from psana.detector.UtilsEpix10ka import event_constants
import argparse
USAGE = '\n %s -h' % SCRNAME\
+ '\n %s -r554 -t1' % SCRNAME\
+ '\n %s -e ueddaq02 -d epixquad -r554 -t1' % SCRNAME\
+ '\n -t, --tname - test name/number:'\
+ '\n 1 - segment numeration'\
+ '\n 2 - gain range index'\
+ '\n 3 - gain, ADU/keV'\
+ '\n 4 - pedestals'\
+ '\n 5 - rms'\
+ '\n 6 - raw'\
+ '\n 7 - raw-peds'\
+ '\n 8 - (raw-peds)/gain, keV'\
+ '\n 9 - calib, keV'\
+ '\n 10 - status'\
+ '\n 11 - gain factor = 1/gain, keV/ADU'\
+ '\n ----'\
+ '\n 21 - run 401 two-threshold selection issue'\
+ '\n 22 - (raw-peds)/gain, keV hot - specific isuue test'\
+ '\n 23 - (raw-peds)/gain, keV cold - specific isuue test'
d_tname = '0'
d_detname = 'epixquad'
d_expname = 'ueddaq02'
d_run = 554
d_events = 5
d_evskip = 0
d_stepnum = None
d_saveimg = False
d_grindex = None
d_amin = None
d_amax = None
d_cframe = 0
d_loglev = 'INFO'
parser = argparse.ArgumentParser(usage=USAGE, description='%s - test per-event components of the det.raw.calib method'%SCRNAME)
parser.add_argument('-t', '--tname', default=d_tname, type=str, help='test name, def=%s' % d_tname)
parser.add_argument('-d', '--detname', default=d_detname, type=str, help='detector name, def=%s' % d_detname)
parser.add_argument('-e', '--expname', default=d_expname, type=str, help='experiment name, def=%s' % d_expname)
parser.add_argument('-r', '--run', default=d_run, type=int, help='run number, def=%s' % d_run)
parser.add_argument('-N', '--events', default=d_events, type=int, help='maximal number of events, def=%s' % d_events)
parser.add_argument('-K', '--evskip', default=d_evskip, type=int, help='number of events to skip in the beginning of run, def=%s' % d_evskip)
parser.add_argument('-s', '--stepnum', default=d_stepnum, type=int, help='step number counting from 0 or None for all steps, def=%s' % d_stepnum)
parser.add_argument('-S', '--saveimg', default=d_saveimg, action='store_true', help='save image in file, def=%s' % d_saveimg)
parser.add_argument('-g', '--grindex', default=d_grindex, type=int, help='gain range index [0,6] for peds, gains etc., def=%s' % str(d_grindex))
parser.add_argument('-l', '--loglev', default=d_loglev, type=str, help='logger level (DEBUG, INFO, WARNING, etc.), def.=%s' % str(d_loglev))
parser.add_argument('--amin', default=d_amin, type=float, help='spectrum minimal value, def=%s' % str(d_amin))
parser.add_argument('--amax', default=d_amax, type=float, help='spectrum maximal value, def=%s' % str(d_amax))
parser.add_argument('--cframe', default=d_cframe, type=int, help='coordinate frame for images 0/1 for psana/LAB, def=%s' % str(d_cframe))
args = parser.parse_args()
print('*** parser.parse_args: %s' % str(args))
logging.basicConfig(format='[%(levelname).1s] %(name)s L%(lineno)04d: %(message)s', level=DICT_NAME_TO_LEVEL[args.loglev])
logging.getLogger('matplotlib').setLevel(logging.WARNING)
logging.getLogger('psana.psexp.event_manager').setLevel(logging.INFO)
tname = args.tname # sys.argv[1] if len(sys.argv) > 1 else '0'
THRMIN = 100
THRMAX = 500
AMIN = 1
AMAX = 200
CROP1_IMG = False
CROP2_IMG = False
flims = None
fname = 'ims.png'
def selection(arr): return np.where((arr>THRMIN) & (arr<THRMAX), arr, 0)
def amin_amax(args, amin_def=None, amax_def=None):
return args.amin if args.amin else amin_def,\
args.amax if args.amax else amax_def
ds = DataSource(exp=args.expname, run=args.run)
orun = next(ds.runs())
det = orun.Detector(args.detname)
MDB = det.raw._data_bit_mask # M14 if det.raw._dettype == 'epix10ka' else M15
prefix = 'ims-%s-r%04d' % (orun.expt, orun.runnum)
print('*** det.raw._data_bit_mask_: %s' % oct(MDB))
print('*** det.raw._calibconst.keys:', det.raw._calibconst.keys())
print('*** pedestal metadata:', det.raw._calibconst['pedestals'][1])
print('*** gain metadata:', det.raw._calibconst['pixel_gain'][1])
#print('*** rms metadata:', det.raw._calibconst['pixel_rms'][1])
#print('*** status metadata:', det.raw._calibconst['pixel_status'][1])
peds = det.raw._calibconst['pedestals'][0]
gain = det.raw._calibconst['pixel_gain'][0]
rms = det.raw._calibconst['pixel_rms'][0]
status = det.raw._calibconst['pixel_status'][0]
print(info_ndarr(peds,'pedestals'))
print(info_ndarr(rms,'rms'))
print(info_ndarr(gain,'gain, ADU/keV'))
arr, img = None, None
suffix = ''
evt_peds, evt_gfac = None, None
for nstep,step in enumerate(orun.steps()):
if args.stepnum is not None and nstep<args.stepnum:
print('skip nstep %d < stepnum=%d' % (nstep, args.stepnum))
continue
if args.stepnum is not None and nstep>args.stepnum:
print('break at nstep %d > stepnum=%d' % (nstep, args.stepnum))
break
print('=== Step %d' % nstep)
for nevt,evt in enumerate(step.events()):
if nevt>args.events:
print('break at nevt %d' % nevt)
break
if nevt<args.evskip:
print('skip nevt %d' % nevt)
continue
if tname in ('4', '7', '8', '22', '23'):
evt_peds = peds[args.grindex,:] if args.grindex is not None else\
event_constants(det.raw, evt, peds) #(7, 4, 352, 384) -> (4, 352, 384)
print(info_ndarr(evt_peds,'evt_peds'))
if tname in ('8', '11', '22', '23'):
gfac = divide_protected( | np.ones_like(gain) | numpy.ones_like |
import loader as ld
import fun_basicas as fun
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from scipy.optimize import minimize
def coste(theta1, theta2, X, Y, num_etiquetas): # Y preparada
A1, A2, h = forward_prop(X, theta1, theta2)
sum1 = Y * np.log(h)
sum2 = (1 - Y) * np.log(1 - h + 1e-6)
return (-1 / X.shape[0]) * np.sum(sum1 + sum2)
def coste_reg(theta1, theta2, X, Y, num_etiquetas, Lambda):
c = coste(theta1, theta2, X, Y, num_etiquetas)
m = X.shape[0]
e = sum(sum(theta1[:, 1:] ** 2)) + sum(sum(theta2[:, 1:] ** 2))
return c + (Lambda / (2 * m)) * e
def forward_prop(X, theta1, theta2):
n = X.shape[0]
# Se añade una fila de unos a la matriz inicial
X = np.hstack([np.ones([n, 1]), X])
# La capa oculta utiliza la primera matriz de pesos para crear sus neuronas y le añade una fila de unos
Oculta = fun.sigmoide(np.dot(X, theta1.T))
Oculta = np.hstack([np.ones([n, 1]), Oculta])
# El resultado se calcula pasando por la segunda matriz de pesos todas las neuronas de la capa oculta
Resultado = fun.sigmoide(np.dot(Oculta, theta2.T))
return X, Oculta, Resultado
def gradiente(theta1, theta2, X, y):
# Creamos los Delta con la forma de theta pero inicializados a cero
Delta1 = np.zeros(np.shape(theta1))
Delta2 = np.zeros(np.shape(theta2))
m = len(y)
# Se realiza la propagación hacia delante
A1, A2, h = forward_prop(X, theta1, theta2)
# Se realiza la propagación hacia atras para cada
# elemento para comprobar el fallo
for k in range(m):
a1k = A1[k, :]
a2k = A2[k, :]
a3k = h[k, :]
yk = y[k, :]
d3 = a3k - yk
g_prima = (a2k * (1 - a2k))
d2 = np.dot(theta2.T, d3) * g_prima
Delta1 = Delta1 + np.dot(d2[1:, np.newaxis], a1k[np.newaxis, :])
Delta2 = Delta2 + np.dot(d3[:, np.newaxis], a2k[np.newaxis, :])
# Se devuelven los Deltas que corresponden al gradiente
return Delta1 / m, Delta2 / m
def gradiente_reg(theta1, theta2, X, y, Lambda):
m = len(y)
Delta1, Delta2 = gradiente(theta1, theta2, X, y)
# A cada elemento del gradiente (menos la primera columna) se le añade el termino de regularización Lambda
# multiplicado por cada elemento de las matriz theta 1 y theta2
Delta1[:, 1:] = Delta1[:, 1:] + (Lambda / m) * theta1[:, 1:]
Delta2[:, 1:] = Delta2[:, 1:] + (Lambda / m) * theta2[:, 1:]
return Delta1, Delta2
def backprop(params_rn, num_entradas, num_ocultas, num_etiquetas, X, y, reg):
# backprop devuelve una tupla (coste, gradiente) con el coste y el gradiente de
# una red neuronal de tres capas , con num_entradas , num_ocultas nodos en la capa
# oculta y num_etiquetas nodos en la capa de salida. Si m es el numero de ejemplos
# de entrenamiento, la dimensión de ’X’ es (m, num_entradas) y la de ’y’ es
# (m, num_etiquetas)
theta1 = np.reshape(params_rn[:num_ocultas * (num_entradas + 1)], (num_ocultas, (num_entradas + 1)))
theta2 = np.reshape(params_rn[num_ocultas * (num_entradas + 1):], (num_etiquetas, (num_ocultas + 1)))
m = len(y)
D1, D2 = gradiente_reg(theta1, theta2, X, y, reg)
coste = coste_reg(theta1, theta2, X, y, num_etiquetas, reg)
gradiente = np.concatenate((np.ravel(D1), np.ravel(D2)))
return coste, gradiente
def prueba_neurona(X, y, theta1, theta2):
"""función que devuelve el porcentaje de acierto de una red neuronal utilizando unas matrices de pesos dadas"""
n = len(y)
y = | np.ravel(y) | numpy.ravel |
import logging
import functools
import os
import shelve
import pandas as pd
import numpy as np
import seaborn as sns
import itertools
import matplotlib.pyplot as plt
import matplotlib
import math
from natsort import natsorted
from decimal import Decimal
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
from matplotlib import colors
import matplotlib.lines as mlines
from matplotlib.ticker import AutoMinorLocator
from matplotlib.gridspec import GridSpec
import matplotlib.gridspec as gridspec
from matplotlib.patches import Rectangle
from scipy.stats import ttest_ind
from statsmodels.stats.multitest import multipletests
from inspect import getmembers, isclass
from .config import FilterChoice
logger = logging.getLogger(__name__)
# map matplotlib color codes to the default seaborn palette
sns.set()
sns.set_color_codes()
_ = plt.plot([0, 1], color='r')
sns.set_color_codes()
_ = plt.plot([0, 2], color='b')
sns.set_color_codes()
_ = plt.plot([0, 3], color='g')
sns.set_color_codes()
_ = plt.plot([0, 4], color='m')
sns.set_color_codes()
_ = plt.plot([0, 5], color='y')
plt.close('all')
# Pipeline module order, to be filled in by the @module decorator.
pipeline_modules = []
pipeline_module_names = []
def module(func):
"""
Annotation for pipeline module functions.
This function adds the given function to the registry list. It also wraps
the given function to log a pre/post-call banner.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
logger.info("=" * 70)
logger.info("RUNNING MODULE: %s", func.__name__)
result = func(*args, **kwargs)
logger.info("=" * 70)
logger.info("")
return result
pipeline_modules.append(wrapper)
pipeline_module_names.append(wrapper.__name__)
return wrapper
def save_data(path, data, **kwargs):
"""
Save a DataFrame as csv, creating all intermediate directories.
Extra kwargs will be passed through to pandas.DataFrame.to_csv.
"""
if not path.name.endswith(".csv"):
raise ValueError("Path must end with .csv")
path.parent.mkdir(parents=True, exist_ok=True)
data.to_csv(path, **kwargs)
def save_figure(path, figure=None, **kwargs):
"""
Save a matplotlib figure as pdf, creating all intermediate directories.
A figure may be passed explicitly, or the matplotlib current figure will
be used.
Extra kwargs will be passed through to matplotlib.pyplot.savefig.
"""
if not path.name.endswith(".pdf"):
raise ValueError("Path must end with .pdf")
if figure is None:
figure = plt.gcf()
path.parent.mkdir(parents=True, exist_ok=True)
figure.savefig(str(path), **kwargs)
def open_dashboards(path, **kwargs):
"""
Open Python shelve containing dashboard data
in dictionary form.
"""
os.chdir(path)
dashboards_shlf = shelve.open('dashboards.shelve', writeback=True)
return dashboards_shlf
def categorical_cmap(numUniqueSamples, uniqueSampleNames, numCatagories,
reverseSampleOrder=False, flipColorOrder=False,
cmap='seaborn_default', continuous=False):
"""
Generate a categorical colormap of length numUniqueSamples.
cmap = 'tab10', 'seaborn_default', etc.
"""
if cmap == 'seaborn_default':
channel_color_list = sns.color_palette()
else:
# specify and apply color list index order
base_colors = plt.get_cmap(cmap)
channel_color_list = [base_colors(i) for i in range(base_colors.N)]
color_order = [3, 0, 2, 4, 8, 6, 1, 5, 9, 7]
if flipColorOrder:
color_order = | np.flip(color_order) | numpy.flip |
import numpy as np
# X: a standard data array (d by n)
# y: a standard labels row vector (1 by n)
# iters: the number of updates to perform on weights WW
# lrate: the learning rate used
# K: the mini-batch size to be used
import math
class Sequential:
def __init__(self, modules, loss):
self.modules = modules
self.loss = loss
def mini_gd(self, X, Y, iters, lrate, notif_each=None, K=10):
D, N = X.shape
np.random.seed(0)
num_updates = 0
indices = np.arange(N)
while num_updates < iters:
np.random.shuffle(indices)
X = X[:,indices] # Your code
Y = Y[:,indices] # Your code
for j in range(math.floor(N/K)):
if num_updates >= iters: break
# Implement the main part of mini_gd here
Xt = X[:,(j*K):(j+1)*K] # Your code
Yt = Y[:,(j*K):(j+1)*K] # Your code
# The rest of this function should be similar to your
# implementation of Sequential.sgd in HW 7
# Your code
Ypred= self.forward(Xt)
loss= self.loss.forward(Ypred,Yt)
dLdZ= self.loss.backward()
self.backward(dLdZ)
self.sgd_step(lrate)
num_updates += 1
def forward(self, Xt):
for m in self.modules: Xt = m.forward(Xt)
return Xt
def backward(self, delta):
for m in self.modules[::-1]: delta = m.backward(delta)
def sgd_step(self, lrate):
for m in self.modules: m.sgd_step(lrate)
class BatchNorm(Module):
def __init__(self, m):
| np.random.seed(0) | numpy.random.seed |
import codecs
import copy
import gc
import json
import logging
import os
import pickle
import random
import time
from typing import List, Tuple, Union
# %%
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# %%
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.metrics import median_absolute_error, r2_score
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import Binarizer, KBinsDiscretizer
import tensorflow as tf
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.utils import losses_utils, tf_utils
from tensorflow.python.ops.losses import util as tf_losses_util
import tensorflow_addons as tfa
import tensorflow_probability as tfp
import umap
# %%
from assessment import calc_uncertainty_regection_curve, f_beta_metrics
from uncertainty import ensemble_uncertainties_regression
# %%
class LossFunctionWrapper(tf.keras.losses.Loss):
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = tf.keras.backend.eval(v) if tf_utils.is_tensor_or_variable(v) \
else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# %%
def npairs_loss(labels, feature_vectors):
feature_vectors_normalized = tf.math.l2_normalize(feature_vectors, axis=1)
logits = tf.divide(
tf.matmul(
feature_vectors_normalized, tf.transpose(feature_vectors_normalized)
),
0.5 # temperature
)
return tfa.losses.npairs_loss(tf.squeeze(labels), logits)
# %%
class NPairsLoss(LossFunctionWrapper):
def __init__(self, reduction=losses_utils.ReductionV2.AUTO,
name='m_pairs_loss'):
super(NPairsLoss, self).__init__(npairs_loss, name=name,
reduction=reduction)
# %%
def build_preprocessor(X: np.ndarray, colnames: List[str]) -> Pipeline:
X_ = Pipeline(steps=[
(
'imputer', SimpleImputer(
missing_values=np.nan, strategy='constant',
fill_value=-1.0
)
),
(
'scaler',
MinMaxScaler()
)
]).fit_transform(X)
X_ = np.rint(X_ * 100000.0).astype(np.int32)
binary_features = dict()
categorical_features = dict()
removed_features = []
for col_idx in range(X.shape[1]):
values = set(X_[:, col_idx].tolist())
print(f'Column {col_idx} "{colnames[col_idx]}" has ' \
f'{len(values)} unique values.')
if len(values) > 1:
if len(values) < 3:
binary_features[col_idx] = np.min(X[:, col_idx])
else:
categorical_features[col_idx] = len(values)
else:
removed_features.append(col_idx)
del values
del X_
all_features = set(range(X.shape[1]))
useful_features = sorted(list(all_features - set(removed_features)))
if len(useful_features) == 0:
raise ValueError('Training inputs are bad. All features are removed.')
print(f'There are {X.shape[1]} features.')
if len(removed_features) > 0:
print(f'These features will be removed: ' \
f'{[colnames[col_idx] for col_idx in removed_features]}.')
transformers = []
if (len(categorical_features) > 0) and (len(binary_features) > 0):
print(f'There are {len(categorical_features)} categorical ' \
f'features and {len(binary_features)} binary features.')
elif len(categorical_features) > 0:
print(f'There are {len(categorical_features)} categorical features.')
else:
print(f'There are {len(binary_features)} binary features.')
for col_idx in categorical_features:
n_unique_values = categorical_features[col_idx]
transformers.append(
(
colnames[col_idx],
KBinsDiscretizer(
n_bins=min(max(n_unique_values // 3, 3), 256),
encode='ordinal',
strategy=('quantile' if n_unique_values > 50 else 'kmeans')
),
(col_idx,)
)
)
for col_idx in binary_features:
transformers.append(
(
colnames[col_idx],
Binarizer(threshold=0.0),
(col_idx,)
)
)
preprocessor = Pipeline(steps=[
(
'imputer', SimpleImputer(
missing_values=np.nan, strategy='constant',
fill_value=-1.0
)
),
(
'minmax_scaler',
MinMaxScaler()
),
(
'composite_transformer', ColumnTransformer(
transformers=transformers,
sparse_threshold=0.0,
n_jobs=1
)
),
(
'selector',
VarianceThreshold()
),
(
'standard_scaler',
StandardScaler(with_mean=True, with_std=True)
),
(
'pca',
PCA(random_state=42)
)
])
return preprocessor.fit(X)
# %%
def reduce_dimensions_of_data(features: np.ndarray) -> np.ndarray:
preprocessed_features = Pipeline(
steps=[
('scaler', StandardScaler()),
('pca', PCA(n_components=features.shape[1] // 3,
random_state=42))
]
).fit_transform(features)
print('Features are preprocessed.')
reduced_features = umap.UMAP(
low_memory=False,
n_jobs=-1,
random_state=42,
verbose=True
).fit_transform(preprocessed_features)
print('Feature space is reduced.')
del preprocessed_features
return reduced_features
# %%
def show_temperature(features: np.ndarray, targets: np.ndarray,
title: str = '', figure_id: int = 0):
if features.shape[0] != targets.shape[0]:
err_msg = f'Features do not correspond to targets! ' \
f'{features.shape[0]} != {targets.shape[0]}'
raise ValueError(err_msg)
if len(features.shape) != 2:
err_msg = f'Features are wrong! Expected 2-D array, got ' \
f'{len(features.shape)}-D one.'
raise ValueError(err_msg)
if features.shape[1] != 2:
err_msg = f'Features are wrong! Expected number of ' \
f'columns is 2, got {features.shape[1]}.'
raise ValueError(err_msg)
if len(targets.shape) != 1:
err_msg = f'Targets are wrong! Expected 1-D array, got ' \
f'{len(targets.shape)}-D one.'
raise ValueError(err_msg)
sorted_targets = sorted(targets.tolist())
n_percentile2 = max(int(round(0.01 * len(sorted_targets))), 1)
min_target = sorted_targets[n_percentile2]
max_target = sorted_targets[-n_percentile2]
del sorted_targets
clipped_targets = np.empty(targets.shape, dtype=np.float64)
for sample_idx in range(targets.shape[0]):
if targets[sample_idx] < min_target:
clipped_targets[sample_idx] = min_target
elif targets[sample_idx] > max_target:
clipped_targets[sample_idx] = max_target
else:
clipped_targets[sample_idx] = targets[sample_idx]
temperature_colors = clipped_targets.tolist()
temperature_norm = Normalize(vmin=np.min(temperature_colors),
vmax=np.max(temperature_colors))
fig = plt.figure(figure_id, figsize=(11, 11))
plt.scatter(x=features[:, 0], y=features[:, 1],
marker='o', cmap=plt.cm.get_cmap("jet"), c=temperature_colors,
norm=temperature_norm)
if len(title) > 0:
plt.title(f'UMAP projections of weather data {title} (temperature)')
else:
plt.title(f'UMAP projections of weather data (temperature)')
plt.colorbar()
plt.show()
# %%
def filter_dataset(y: np.ndarray) -> List[int]:
all_values = sorted(y.tolist())
n = len(all_values)
if n <= 10000:
err_msg = f'y is wrong! Expected length of y is greater than 10000, ' \
f'but got {n}.'
raise ValueError(err_msg)
y001 = all_values[int(round((n - 1) * 0.001))]
y999 = all_values[int(round((n - 1) * 0.999))]
del all_values
filtered_indices = list(filter(
lambda idx: (y[idx] > y001) and (y[idx] < y999),
range(n)
))
return filtered_indices
# %%
def build_neural_network(input_size: int, layer_size: int, n_layers: int,
dropout_rate: float, scale_coeff: float,
nn_name: str) -> tf.keras.Model:
feature_vector = tf.keras.layers.Input(
shape=(input_size,), dtype=tf.float32,
name=f'{nn_name}_feature_vector'
)
outputs = []
hidden_layer = tf.keras.layers.AlphaDropout(
rate=dropout_rate,
seed=random.randint(0, 2147483647),
name=f'{nn_name}_dropout1'
)(feature_vector)
for layer_idx in range(1, (2 * n_layers) // 3 + 1):
try:
kernel_initializer = tf.keras.initializers.LecunNormal(
seed=random.randint(0, 2147483647)
)
except:
kernel_initializer = tf.compat.v1.keras.initializers.lecun_normal(
seed=random.randint(0, 2147483647)
)
hidden_layer = tf.keras.layers.Dense(
units=layer_size,
activation='selu',
kernel_initializer=kernel_initializer,
bias_initializer='zeros',
name=f'{nn_name}_dense{layer_idx}'
)(hidden_layer)
hidden_layer = tf.keras.layers.AlphaDropout(
rate=dropout_rate,
seed=random.randint(0, 2147483647),
name=f'{nn_name}_dropout{layer_idx + 1}'
)(hidden_layer)
try:
kernel_initializer = tf.keras.initializers.LecunNormal(
seed=random.randint(0, 2147483647)
)
except:
kernel_initializer = tf.compat.v1.keras.initializers.lecun_normal(
seed=random.randint(0, 2147483647)
)
projection_layer = tf.keras.layers.Dense(
units=50,
activation=None,
use_bias=False,
kernel_initializer=kernel_initializer,
name=f'{nn_name}_projection'
)(hidden_layer)
for layer_idx in range((2 * n_layers) // 3 + 1, n_layers + 1):
try:
kernel_initializer = tf.keras.initializers.LecunNormal(
seed=random.randint(0, 2147483647)
)
except:
kernel_initializer = tf.compat.v1.keras.initializers.lecun_normal(
seed=random.randint(0, 2147483647)
)
hidden_layer = tf.keras.layers.Dense(
units=layer_size,
activation='selu',
kernel_initializer=kernel_initializer,
bias_initializer='zeros',
name=f'{nn_name}_dense{layer_idx}'
)(hidden_layer)
hidden_layer = tf.keras.layers.AlphaDropout(
rate=dropout_rate,
seed=random.randint(0, 2147483647),
name=f'{nn_name}_dropout{layer_idx + 1}'
)(hidden_layer)
try:
kernel_initializer = tf.keras.initializers.LecunNormal(
seed=random.randint(0, 2147483647)
)
except:
kernel_initializer = tf.compat.v1.keras.initializers.lecun_normal(
seed=random.randint(0, 2147483647)
)
output_layer = tf.keras.layers.Dense(
units=2,
activation=None,
use_bias=False,
kernel_initializer=kernel_initializer,
name=f'{nn_name}_output'
)(hidden_layer)
bayesian_layer = tfp.layers.DistributionLambda(
lambda t: tfp.distributions.Normal(
loc=t[..., :1],
scale=1e-6 + tf.math.softplus((1.0 / scale_coeff) * t[..., 1:])
),
name=f'{nn_name}_distribution'
)(output_layer)
neural_network = tf.keras.Model(
inputs=feature_vector,
outputs=[bayesian_layer, projection_layer],
name=nn_name
)
negloglik = lambda y, rv_y: -rv_y.log_prob(y)
radam = tfa.optimizers.RectifiedAdam(learning_rate=3e-4)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
losses = {
f'{nn_name}_distribution': negloglik,
f'{nn_name}_projection': NPairsLoss()
}
loss_weights = {
f'{nn_name}_distribution': 1.0,
f'{nn_name}_projection': 0.5
}
metrics = {
f'{nn_name}_distribution': [
tf.keras.metrics.MeanAbsoluteError()
]
}
neural_network.compile(
optimizer=ranger,
loss=losses,
loss_weights=loss_weights,
metrics=metrics
)
return neural_network
# %%
def show_training_process(history: tf.keras.callbacks.History, metric_name: str,
figure_id: int = 1, comment: str = ''):
val_metric_name = 'val_' + metric_name
if metric_name not in history.history:
err_msg = f'The metric "{metric_name}" is not found! Available metrics are: ' \
f'{list(history.history.keys())}.'
raise ValueError(err_msg)
plt.figure(figure_id, figsize=(5, 5))
interesting_metric = history.history[metric_name]
plt.plot(list(range(len(interesting_metric))), interesting_metric,
label=f'Training {metric_name}')
if val_metric_name in history.history:
interesting_val_metric = history.history[val_metric_name]
assert len(interesting_metric) == len(interesting_val_metric)
plt.plot(list(range(len(interesting_val_metric))),
interesting_val_metric,
label=f'Validation {metric_name}')
plt.xlabel('Epochs')
plt.ylabel(metric_name)
if len(comment) > 0:
plt.title(f'Training process of {comment}')
else:
plt.title('Training process')
plt.legend(loc='best')
plt.show()
# %%
def predict_with_single_nn(input_data: np.ndarray, model_for_prediction: tf.keras.Model,
batch_size: int, output_scaler: StandardScaler) \
-> Tuple[np.ndarray, np.ndarray]:
if len(input_data.shape) != 2:
err_msg = f'The `input_data` argument is wrong! Expected 2-D array, ' \
f'got {len(input_data.shape)}-D one!'
raise ValueError(err_msg)
n_batches = int(np.ceil(input_data.shape[0] / float(batch_size)))
pred_mean = []
pred_std = []
for batch_idx in range(n_batches):
batch_start = batch_idx * batch_size
batch_end = min(input_data.shape[0], batch_start + batch_size)
instant_predictions = model_for_prediction(input_data[batch_start:batch_end])[0]
if not isinstance(instant_predictions, tfp.distributions.Distribution):
err_msg = f'Minibatch {batch_idx}: predictions are wrong! ' \
f'Expected tfp.distributions.Distribution, ' \
f'got {type(instant_predictions)}.'
raise ValueError(err_msg)
instant_mean = instant_predictions.mean()
instant_std = instant_predictions.stddev()
del instant_predictions
if not isinstance(instant_mean, np.ndarray):
instant_mean = instant_mean.numpy()
if not isinstance(instant_std, np.ndarray):
instant_std = instant_std.numpy()
instant_mean = instant_mean.astype(np.float64).flatten()
instant_std = instant_std.astype(np.float64).flatten()
pred_mean.append(instant_mean)
pred_std.append(instant_std)
del instant_mean, instant_std
pred_mean = np.concatenate(pred_mean)
pred_std = np.concatenate(pred_std)
pred_mean = output_scaler.inverse_transform(
pred_mean.reshape((input_data.shape[0], 1))
).flatten()
pred_std *= output_scaler.scale_[0]
return pred_mean, pred_std * pred_std
# %%
def evaluate_single_nn(pred_means: np.ndarray, pred_vars: np.ndarray,
true_outputs: np.ndarray) -> float:
if len(pred_means.shape) != 1:
err_msg = f'The `pred_means` argument is wrong! Expected 1-D array, ' \
f'got {len(pred_means.shape)}-D one.'
raise ValueError(err_msg)
if len(pred_vars.shape) != 1:
err_msg = f'The `pred_vars` argument is wrong! Expected 1-D array, ' \
f'got {len(pred_vars.shape)}-D one.'
raise ValueError(err_msg)
if len(true_outputs.shape) != 1:
err_msg = f'The `true_outputs` argument is wrong! Expected 1-D array, ' \
f'got {len(true_outputs.shape)}-D one.'
raise ValueError(err_msg)
n_test_samples = true_outputs.shape[0]
if n_test_samples < 5:
raise ValueError(f'Number of test samples = {n_test_samples} is too small!')
if n_test_samples != pred_means.shape[0]:
err_msg = f'The `pred_means` does not correspond to the `true_outputs`! ' \
f'{pred_means.shape[0]} != {n_test_samples}'
raise ValueError(err_msg)
if n_test_samples != pred_vars.shape[0]:
err_msg = f'The `pred_vars` does not correspond to the `true_outputs`! ' \
f'{pred_vars.shape[0]} != {n_test_samples}'
raise ValueError(err_msg)
all_preds_ = np.empty((1, n_test_samples, 2), dtype=np.float32)
all_preds_[0, :, 0] = pred_means
all_preds_[0, :, 1] = pred_vars
all_uncertainty_ = ensemble_uncertainties_regression(all_preds_)
uncertainties = all_uncertainty_['tvar']
del all_preds_, all_uncertainty_
errors = (pred_means - true_outputs) ** 2
rejection_mse_ = calc_uncertainty_regection_curve(errors, uncertainties)
return | np.mean(rejection_mse_) | numpy.mean |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from logger import setup_logger
from models.model_stages import BiSeNet
from cityscapes import CityScapes
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.distributed as dist
import os
import os.path as osp
import logging
import time
import numpy as np
from tqdm import tqdm
import math
# miou算法
class MscEvalV0(object):
def __init__(self, scale=0.5, ignore_label=255):
self.ignore_label = ignore_label
self.scale = scale
# 传递net网络,dl数据集,n_classes种类
def __call__(self, net, dl, n_classes):
## evaluate
# hist矩阵
hist = torch.zeros(n_classes, n_classes).cuda().detach()
if dist.is_initialized() and dist.get_rank() != 0:
diter = enumerate(dl)
else:
diter = enumerate(tqdm(dl))
for i, (imgs, label) in diter:
# 这里在测试中莫名奇妙的包了一层1,
N, _, H, W = label.shape
label = label.squeeze(1).cuda()
# 转换一下维度
# 如上述所言
size = label.size()[-2:]
imgs = imgs.cuda()
N, C, H, W = imgs.size()
new_hw = [int(H * self.scale), int(W * self.scale)]
imgs = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True)
logits = net(imgs)[0]
logits = F.interpolate(logits, size=size,
mode='bilinear', align_corners=True)
probs = torch.softmax(logits, dim=1)
preds = torch.argmax(probs, dim=1)
keep = label != self.ignore_label
hist += torch.bincount(
label[keep] * n_classes + preds[keep],
minlength=n_classes ** 2
).view(n_classes, n_classes).float()
if dist.is_initialized():
dist.all_reduce(hist, dist.ReduceOp.SUM)
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
miou = ious.mean()
return miou.item()
# 验证函数,dspth为根目录
def evaluatev0(respth='./pretrained', dspth='./data', backbone='CatNetSmall', scale=0.75, use_boundary_2=False,
use_boundary_4=False, use_boundary_8=False, use_boundary_16=False, use_conv_last=False):
print('scale', scale)
print('use_boundary_2', use_boundary_2)
print('use_boundary_4', use_boundary_4)
print('use_boundary_8', use_boundary_8)
print('use_boundary_16', use_boundary_16)
## dataset
batchsize = 5
n_workers = 2
dsval = CityScapes(dspth, mode='val')
dl = DataLoader(dsval,
batch_size=batchsize,
shuffle=False,
num_workers=n_workers,
drop_last=False)
n_classes = 19
print("backbone:", backbone)
net = BiSeNet(backbone=backbone, n_classes=n_classes,
use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4,
use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16,
use_conv_last=use_conv_last)
net.load_state_dict(torch.load(respth))
net.cuda()
net.eval()
with torch.no_grad():
single_scale = MscEvalV0(scale=scale)
mIOU = single_scale(net, dl, 19)
logger = logging.getLogger()
logger.info('mIOU is: %s\n', mIOU)
class MscEval(object):
def __init__(self,
model,
dataloader,
scales=[0.5, 0.75, 1, 1.25, 1.5, 1.75],
n_classes=19,
lb_ignore=255,
cropsize=1024,
flip=True,
*args, **kwargs):
self.scales = scales
self.n_classes = n_classes
self.lb_ignore = lb_ignore
self.flip = flip
self.cropsize = cropsize
## dataloader
self.dl = dataloader
self.net = model
def pad_tensor(self, inten, size):
N, C, H, W = inten.size()
outten = torch.zeros(N, C, size[0], size[1]).cuda()
outten.requires_grad = False
margin_h, margin_w = size[0] - H, size[1] - W
hst, hed = margin_h // 2, margin_h // 2 + H
wst, wed = margin_w // 2, margin_w // 2 + W
outten[:, :, hst:hed, wst:wed] = inten
return outten, [hst, hed, wst, wed]
def eval_chip(self, crop):
with torch.no_grad():
out = self.net(crop)[0]
prob = F.softmax(out, 1)
if self.flip:
crop = torch.flip(crop, dims=(3,))
out = self.net(crop)[0]
out = torch.flip(out, dims=(3,))
prob += F.softmax(out, 1)
prob = torch.exp(prob)
return prob
def crop_eval(self, im):
cropsize = self.cropsize
stride_rate = 5 / 6.
N, C, H, W = im.size()
long_size, short_size = (H, W) if H > W else (W, H)
if long_size < cropsize:
im, indices = self.pad_tensor(im, (cropsize, cropsize))
prob = self.eval_chip(im)
prob = prob[:, :, indices[0]:indices[1], indices[2]:indices[3]]
else:
stride = math.ceil(cropsize * stride_rate)
if short_size < cropsize:
if H < W:
im, indices = self.pad_tensor(im, (cropsize, W))
else:
im, indices = self.pad_tensor(im, (H, cropsize))
N, C, H, W = im.size()
n_x = math.ceil((W - cropsize) / stride) + 1
n_y = math.ceil((H - cropsize) / stride) + 1
prob = torch.zeros(N, self.n_classes, H, W).cuda()
prob.requires_grad = False
for iy in range(n_y):
for ix in range(n_x):
hed, wed = min(H, stride * iy + cropsize), min(W, stride * ix + cropsize)
hst, wst = hed - cropsize, wed - cropsize
chip = im[:, :, hst:hed, wst:wed]
prob_chip = self.eval_chip(chip)
prob[:, :, hst:hed, wst:wed] += prob_chip
if short_size < cropsize:
prob = prob[:, :, indices[0]:indices[1], indices[2]:indices[3]]
return prob
def scale_crop_eval(self, im, scale):
N, C, H, W = im.size()
new_hw = [int(H * scale), int(W * scale)]
im = F.interpolate(im, new_hw, mode='bilinear', align_corners=True)
prob = self.crop_eval(im)
prob = F.interpolate(prob, (H, W), mode='bilinear', align_corners=True)
return prob
def compute_hist(self, pred, lb):
n_classes = self.n_classes
ignore_idx = self.lb_ignore
keep = np.logical_not(lb == ignore_idx)
merge = pred[keep] * n_classes + lb[keep]
hist = np.bincount(merge, minlength=n_classes ** 2)
hist = hist.reshape((n_classes, n_classes))
return hist
def evaluate(self):
## evaluate
n_classes = self.n_classes
hist = np.zeros((n_classes, n_classes), dtype=np.float32)
dloader = tqdm(self.dl)
if dist.is_initialized() and not dist.get_rank() == 0:
dloader = self.dl
for i, (imgs, label) in enumerate(dloader):
N, _, H, W = label.shape
probs = torch.zeros((N, self.n_classes, H, W))
probs.requires_grad = False
imgs = imgs.cuda()
for sc in self.scales:
# prob = self.scale_crop_eval(imgs, sc)
prob = self.eval_chip(imgs)
probs += prob.detach().cpu()
probs = probs.data.numpy()
preds = np.argmax(probs, axis=1)
hist_once = self.compute_hist(preds, label.data.numpy().squeeze(1))
hist = hist + hist_once
IOUs = np.diag(hist) / ( | np.sum(hist, axis=0) | numpy.sum |
import json
import os
import sys
import math
import glob
import numpy as np
import random
import csv
import subprocess
import time
#Before using, open Dream3D and set the folder that you want the output files in.
#Functions here used to change the pipeline only affect the name of the output file
#not the directory.
#Type directory containing json files for Dream3D Pipelines
pipelineDirectory = '/home/jackyl/Desktop/Dream3DPyTest/Pipes/FilterPipelines'
#PipelineRunnerDirectory
# Currently this requires that the PipelineRunner file be placed in the Plugins
# directory of the DREAM3D files.
pipeRunnerDirectory = '/home/jackyl/Desktop/Dream3DPyTest/Dream3D-6.3.29/Plugins'
#Path to output directory
outputDirectory = '/home/jackyl/Desktop/Dream3DPyTest/VolFrac'
################################################
#Housekeeping - Managing files
################################################
def openPipeline(filePath):
#Open JSON for editing
with open(filePath, 'r') as jsonData:
pipeData = json.load(jsonData)
return pipeData
def updatePipeline(pipeData, filePath):
#Overwrite JSON
with open(filePath, "w") as jsonFile:
jsonFile.write(json.dumps(pipeData))
def runPipelineRunner(pipeline):
# Changed Working Directory to where my pipelinerunner command was
# This may not be necessary on your machine, check with PipelineRunner Docs for Dream3D
# and adjust cwd as necessary
# Runs PipelineRunner in Terminal - subprocess should not continue unless previous is done.
subprocess.call(['./PipelineRunner', '-p', pipeline],
cwd =pipeRunnerDirectory)
#
# This is also valid, and allows starting several DREAM3D processes, but does not stop
# even if it uses all the RAM available and crashes
# USE AS YOUR OWN RISK (Add a time.sleep call to the trial function)
# subprocess.Popen(['./PipelineRunner', '-p', pipeline],
# cwd=pipeRunnerDirectory)
################################################
# JSON Editing Functions
################################################
def changeMuAndSD(pipeData, newMu, newSD, phase=1, cutoff=4):
#Overwrite JSON with new Mu and SD
for item in pipeData:
if (item != "PipelineBuilder" and int(item) == 0):
section = item
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Distribution']['Average'] = newMu
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Distribution']['Standard Deviation'] = newSD
pipeData[section]['StatsDataArray'][str(phase)]['Feature_Diameter_Info'][1] = math.exp(newMu + newSD*cutoff)
pipeData[section]['StatsDataArray'][str(phase)]['Feature_Diameter_Info'][2] = math.exp(newMu - newSD*cutoff)
def changePhaseFraction(pipeData, fraction, phase=1):
#Overwrite JSON with new volume fraction for the phase
for item in pipeData:
if (item != "PipelineBuilder" and int(item) == 0):
section = item
pipeData[section]['StatsDataArray'][str(phase)]['PhaseFraction'] = fraction
def changeDimensions(pipeData, inputX, inputY, inputZ):
#Overwrite JSON with new Volume Size
pipeData['01']['Dimensions']['y'] = inputY
pipeData['01']['Dimensions']['x'] = inputX
pipeData['01']['Dimensions']['z'] = inputZ
def changeResolution(pipeData, inputX, inputY, inputZ):
#Overwrite JSON with new Resolution
pipeData['01']['Resolution']['y'] = inputY
pipeData['01']['Resolution']['x'] = inputX
pipeData['01']['Resolution']['z'] = inputZ
def changeShapeDist(pipeData, alpha1, beta1, alpha2, beta2, phase=1):
#Overwrite JSON with new shape distributions (Controlling Alpha/Beta parameters)
for item in pipeData:
if (item != "PipelineBuilder" and int(item) == 0):
section = item
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs B Over A Distributions']['Alpha'] = [alpha1]*6
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs B Over A Distributions']['Beta'] = [beta1]*6
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs C Over A Distributions']['Alpha'] = [alpha2]*6
pipeData[section]['StatsDataArray'][str(phase)]['FeatureSize Vs C Over A Distributions']['Beta'] = [beta2]*6
def changeOutputFileName(pipeData, typeOfFile, newFileName, outputDir=outputDirectory):
# NOTE - Only changes the file name, does not change containing directories
# DO NOT HAVE "/" IN THE NEW FILE NAME
# typeOfFile - csv, dream3d - depends if the filter exists already
if (typeOfFile == "csv"):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == 'Write Feature Data as CSV File'):
section = part
output = 'FeatureDataFile'
elif (typeOfFile == "dream3d"):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == 'Write DREAM.3D Data File'):
section = part
output = 'OutputFile'
elif (typeOfFile == 'polefig'):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == 'Write Pole Figure Images'):
section = part
elif(typeOfFile == 'FFT'):
for part in pipeData:
if (pipeData[part].get('Filter_Human_Label', 0) == "Write Los Alamos FFT File"):
section = part
if (outputDir != None and typeOfFile != 'polefig' and typeOfFile != 'FFT'):
pipeData[section][output] = outputDir + "/" + newFileName
elif (typeOfFile == 'polefig'):
pipeData[section]['OutputPath'] = outputDir
pipeData[section]['ImagePrefix'] = newFileName
elif (typeOfFile == 'FFT'):
pipeData[section]['OutputFile'] = outputDir + "/" + newFileName
pipeData[section]['FeatureIdsArrayPath']['OutputFile'] = outputDir + "/" + newFileName
else:
curName = pipeData[section][output]
partList = curName.split("/")
partList[-1] = newFileName
newName = '/'.join(partList)
pipeData[section][output] = newName
def changeODF(pipeData, e1, e2, e3, wt, sigma, phase=1):
#Change ODF requires e1, e2, e3 to be in degrees
if (type(e1) != list):
e1 = [e1]
if (type(e2) != list):
e2 = [e2]
if (type(e3) != list):
e3 = [e3]
if (type(wt) != list):
wt = [wt]
if (type(sigma) != list):
sigma = [sigma]
e1 = list(map(lambda x: math.radians(x), e1))
e2 = list(map(lambda x: math.radians(x), e2))
e3 = list(map(lambda x: math.radians(x), e3))
if (e1 == [] or e2 == [] or e3 == [] or wt == [] or sigma == []):
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights'] = {}
else:
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Weight'] = wt
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Sigma'] = sigma
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Euler 1'] = e1
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Euler 2'] = e2
pipeData['00']['StatsDataArray'][str(phase)]['ODF-Weights']['Euler 3'] = e3
################################################
# Texture Helper Functions
################################################
def eulerAnglesToMatrix(eulerAngle):
#Angles are in Degrees
phi1 = eulerAngle[0]
Phi = eulerAngle[1]
phi2 = eulerAngle[2]
Z1 = np.matrix([[math.cos(phi1), math.sin(phi1), 0],
[-math.sin(phi1), math.cos(phi1), 0],
[0, 0, 1]])
Z2 = np.matrix([[math.cos(phi2), math.sin(phi2), 0],
[-math.sin(phi2), math.cos(phi2), 0],
[0, 0, 1]])
X = np.matrix([[1, 0, 0],
[0, math.cos(Phi), math.sin(Phi)],
[0, -math.sin(Phi), math.cos(Phi)]])
mat = Z2 * X * Z1
return mat
def matrixToEuler(g):
if (g[2,2] == 1):
A2 = 0
A1 = np.arctan2(g[0,1], g[0,0])/2
A3 = A1
else:
A2 = math.acos(g[2, 2])
A1 = np.arctan2(g[2,0]/math.sin(A2), -g[2,1]/math.sin(A2))
A3 = np.arctan2(g[0,2]/math.sin(A2), g[1,2]/math.sin(A2))
return np.degrees(np.matrix([A1, A2, A3]))
def millerIndexToMatrix(b, n):
#Requires b and n to be np.matrix types
bnorm = b / np.linalg.norm(b)
nnorm = n / np.linalg.norm(n)
t = np.cross(nnorm, bnorm)
tnorm = t / | np.linalg.norm(t) | numpy.linalg.norm |
import sys
import os
from RGBDPose.utils import ply_loader
import numpy as np
import json
import open3d
import copy
def load_pcd(cat):
# load meshes
model_vsd = ply_loader.load_ply(cat)
return model_vsd['pts']
def main(argv):
root = argv[0]
samples = argv[1]
meshes = os.listdir(argv[0])
meshes = [k for k in meshes if k.endswith('.ply')]
mesh_dict = dict()
for mesh_name in meshes:
if mesh_name[-3:] == 'ply':
path = root + '/' + mesh_name
pts = load_pcd(path)
colors = np.zeros(pts.shape)
pcd_model = open3d.PointCloud()
pcd_model.points = open3d.Vector3dVector(pts)
pcd_model.colors = open3d.Vector3dVector(colors)
draw_models = []
draw_models.append(pcd_model)
print(mesh_name)
control_points = np.zeros((1, 3), dtype=np.float32)
# choose starting point
norms = np.linalg.norm(pts, 2, 1)
first_k = np.argmax(norms)
control_points[0, :] = pts[first_k, :]
mesh_sphere = open3d.create_mesh_coordinate_frame(size=0.01,
origin=pts[first_k, :]) # geometry.TriangleMesh.
mesh_sphere.paint_uniform_color([0.1, 0.1, 0.7])
draw_models.append(mesh_sphere)
max_x = np.max(pts[:, 0])
min_x = np.min(pts[:, 0])
max_y = np.max(pts[:, 1])
min_y = np.min(pts[:, 1])
max_z = np.max(pts[:, 2])
min_z = np.min(pts[:, 2])
x_dim = (max_x - min_x) #* 0.5
y_dim = (max_y - min_y) #* 0.5
z_dim = (max_z - min_z) #* 0.5
#min_side_length = min([x_dim], [y_dim], [z_dim])
min_side_length = (x_dim + y_dim + z_dim)/7
print(min_side_length)
for k in range(int(samples)-1):
distances = []
for ind, q_p in enumerate(pts):
dist_sum = 0.0
skipped = False
for p_p in range(control_points.shape[0]):
dist_poi = np.linalg.norm((q_p - control_points[p_p, :]), 2)
dist_sum += dist_poi
if dist_poi < min_side_length:
skipped = True
if skipped == True: # hell of a bad workaround
distances.append(0.0)
else:
distances.append(dist_sum)
point_k = np.argmax(distances)
print(distances[point_k])
cp_now = np.zeros((1, 3), dtype=np.float32)
cp_now[0, :] = pts[point_k, :]
mesh_sphere = open3d.create_mesh_coordinate_frame(size=0.01, origin=pts[point_k, :]) # geometry.TriangleMesh.
mesh_sphere.paint_uniform_color([0.1, 0.1, 0.7])
#mesh_transform = np.ones((4, 4))
#mesh_transform[0, 3] = pts[point_k, 0]
#mesh_transform[1, 3] = pts[point_k, 1]
#mesh_transform[2, 3] = pts[point_k, 2]
#mesh_sphere.transform(mesh_transform)
#open3d.draw_geometries([mesh_sphere])
draw_models.append(mesh_sphere)
control_points = | np.concatenate([control_points, cp_now], axis=0) | numpy.concatenate |
"""
@ <NAME> (<EMAIL>), May 2021
@ <NAME> (<EMAIL>)
Demonstration of the extraction of training and evaluation sets based on
grain segmentation by the LRC-MRM method.
"""
import os
import numpy as np
from scipy import ndimage
from skimage.feature import peak_local_max
from lib.segmentation import run_lrc_mrm
import warnings
warnings.filterwarnings("ignore")
def run_test(ms=8, cps=20, ssize=5_000, debug=False, save_it=False):
"""
Runs the data extraction test.
Args:
ms: Exclusion distance to closest grain boundary (in px).
cps: Size of the NMF basis (pseudo-parameter of the grain segmentation algorithm).
ssize: Random sampling size (pseudo-parameter of the grain segmentation algorithm).
debug: if True, stops the process after 1 window instead of processing the whole dataset.
save_it: whether to save the extracted dataset or not. If True, creates a NPY file (extracted_set_test.npy)
containing a dataset of (1) DRM signals extracted from the specimen and (2) the corresponding EBSD orientations.
"""
# Specify root directory
root = os.path.realpath(__file__)
root = os.path.abspath(os.path.join(os.path.dirname(root), 'data/'))
# Open DRM dataset
data = np.load(f'{root}/drm_data.npy')
rx, ry, s0, s1 = data.shape
# Open EBSD euler map (labels for training / evaluation)
eulers = np.load(f'{root}/eulers.npy')
def data_window_generator():
"""
Yields a sliding window of (200 x 200) pixels across the dataset.
"""
window_size_x, window_size_y = (200, 200)
x_indeces = np.floor(rx // window_size_x)
residual_x = rx - window_size_x * x_indeces
y_indeces = | np.floor(ry // window_size_y) | numpy.floor |
import numpy as np
from math import ceil
from numpy import linalg as LA
import pandas as pd
from collections import defaultdict
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
from itertools import chain
from copy import deepcopy
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
class SensorArray:
times: np.ndarray = None
data: np.ndarray = None
def __init__(self, times, data):
self.times = times
self.data = data
def get_time_index(self, t):
return np.sum(self.times < t)
def get_interval(self, t_start, t_end) -> 'SensorArray':
s = self.get_time_index(t_start)
e = self.get_time_index(t_end)
return SensorArray(self.times[s:e], self.data[:, s:e])
def __deepcopy__(self, memodict={}):
return SensorArray(self.times.copy(), self.data.copy())
class Events:
times: np.ndarray = None
events: List[str] = None
def __init__(self, times, events):
self.times = times
self.events = events
def parse_event_times(self, end_code) -> List[Tuple[str, float, float]]:
t_events = self.times
events = self.events
trials = []
start = None
for i, c in enumerate(events):
if start is None:
if c == end_code:
raise ValueError(
f'Got end code before start code at index {i}, '
f'time {t_events[i]}')
start = events[i], t_events[i]
else:
if c != end_code:
raise ValueError(
f'Expected end code at index {i}, time {t_events[i]}')
trials.append((start[0], start[1], t_events[i]))
start = None
if start is not None:
raise ValueError('Last trial does not have end code')
return trials
class Trial:
previous_trial: 'Trial' = None
trial_code: str = ''
experiment_data: List[SensorArray] = []
trial_start: float = 0
trial_end: float = 0
pre: List[SensorArray] = []
trial: List[SensorArray] = []
post: List[SensorArray] = []
def __init__(
self, experiment_data, trial_code, trial_start, trial_end,
previous_trial=None):
self.experiment_data = experiment_data
self.trial_code = trial_code
self.trial_start = trial_start
self.trial_end = trial_end
self.previous_trial = previous_trial
def parse_trial(self, pre_trial_duration, post_trial_duration) -> None:
self.pre = [
sensor.get_interval(
self.trial_start - pre_trial_duration, self.trial_start)
for sensor in self.experiment_data
]
self.trial = [
sensor.get_interval(self.trial_start, self.trial_end)
for sensor in self.experiment_data
]
self.post = [
sensor.get_interval(
self.trial_end, self.trial_end + post_trial_duration)
for sensor in self.experiment_data
]
def __deepcopy__(self, memodict={}):
# todo handle previous trial copy
trial = Trial(
self.experiment_data, self.trial_code, self.trial_start,
self.trial_end
)
trial.pre = deepcopy(self.pre)
trial.trial = deepcopy(self.trial)
trial.post = deepcopy(self.post)
return trial
def get_section(self, section) -> List[np.ndarray]:
if section in ('pre', 'trial', 'post'):
return [s.data for s in getattr(self, section)]
elif section == 'all':
data = [[] for _ in self.trial]
for i, sensor in enumerate(self.pre):
data[i].append(sensor.data)
for i, sensor in enumerate(self.trial):
data[i].append(sensor.data)
for i, sensor in enumerate(self.post):
data[i].append(sensor.data)
return [ | np.concatenate(item, axis=1) | numpy.concatenate |
#
# Tests of spherical Bessel functions.
#
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_array_almost_equal)
import pytest
from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi
from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn
from scipy.integrate import quad
from scipy._lib._numpy_compat import suppress_warnings
class TestSphericalJn:
def test_spherical_jn_exact(self):
# https://dlmf.nist.gov/10.49.E3
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_jn(2, x),
(-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x))
def test_spherical_jn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_jn(n, x), np.array([0, 0]))
def test_spherical_jn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_jn_large_arg_1(self):
# https://github.com/scipy/scipy/issues/2165
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747)
def test_spherical_jn_large_arg_2(self):
# https://github.com/scipy/scipy/issues/1641
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05)
def test_spherical_jn_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: j0 = sin(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalYn:
def test_spherical_yn_exact(self):
# https://dlmf.nist.gov/10.49.E5
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_yn(2, x),
(1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
def test_spherical_yn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_yn(n, x), np.array([0, 0]))
def test_spherical_yn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_yn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf))
def test_spherical_yn_at_zero_complex(self):
# Consistently with numpy:
# >>> -np.cos(0)/0
# -inf
# >>> -np.cos(0+0j)/(0+0j)
# (-inf + nan*j)
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_yn(n, x), np.full(n.shape, nan))
class TestSphericalJnYnCrossProduct:
def test_spherical_jn_yn_cross_product_1(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 1, x))
right = 1/x**2
assert_allclose(left, right)
def test_spherical_jn_yn_cross_product_2(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 2, x))
right = (2*n + 3)/x**3
assert_allclose(left, right)
class TestSphericalIn:
def test_spherical_in_exact(self):
# https://dlmf.nist.gov/10.49.E9
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_in(2, x),
(1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
def test_spherical_in_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
def test_spherical_in_inf_complex(self):
# https://dlmf.nist.gov/10.52.E5
# Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
# this appears impossible to achieve because C99 regards any complex
# value with at least one infinite part as a complex infinity, so
# 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is
# the correct return value.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
def test_spherical_in_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalKn:
def test_spherical_kn_exact(self):
# https://dlmf.nist.gov/10.49.E13
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_kn(2, x),
pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3))
def test_spherical_kn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_inf_real(self):
# https://dlmf.nist.gov/10.52.E6
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0]))
def test_spherical_kn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E6
# The behavior at complex infinity depends on the sign of the real
# part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's
# z*inf. This distinction cannot be captured, so we return nan.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan]))
def test_spherical_kn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = | np.array([0, 1, 2, 5, 10, 100]) | numpy.array |
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
class IMBALANETINYIMGNET(torchvision.datasets.ImageFolder):
cls_num = 200
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0,
transform=None, target_transform=None):
super(IMBALANETINYIMGNET, self).__init__(root, transform, target_transform)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.samples) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = | np.unique(targets_np) | numpy.unique |
'''Interpretation of the Siemens GSL functions.
Author: <NAME> <<EMAIL>>
Copyright (C) 2020 University of Oxford '''
import numpy as np
def class_ori(sag_comp, cor_comp, tra_comp, debug):
''' Python implementation of IDEA-VB17/n4/pkg/MrServers/MrMeasSrv/SeqFW/libGSL/fGSLClassOri.cpp
Function to determine whether a normal vector describes a sagittal, coronal or transverse slice.
Result:
CASE = 0: Sagittal
CASE = 1: Coronal
CASE = 2: Transverse
:param sag_comp: Sagittal component of normal vector
:param cor_comp: Coronal component of normal vector
:param tra_comp: Transverse component of normal vector
:return: case (0=Sagittal, 1=Coronal or 2=Transverse)
'''
if debug:
print(f'Normal vector = {sag_comp : 10.7f} {cor_comp : 10.7f} {tra_comp : 10.7f}.')
# Compute some temporary values
abs_sag_comp = np.abs(sag_comp)
abs_cor_comp = np.abs(cor_comp)
abs_tra_comp = np.abs(tra_comp)
eq_sag_cor = np.isclose(abs_sag_comp, abs_cor_comp)
eq_sag_tra = np.isclose(abs_sag_comp, abs_tra_comp)
eq_cor_tra = np.isclose(abs_cor_comp, abs_tra_comp)
# Determine the slice orientation (sag, cor, tra)
if ((eq_sag_cor & eq_sag_tra) |
(eq_sag_cor & (abs_sag_comp < abs_tra_comp)) |
(eq_sag_tra & (abs_sag_comp > abs_cor_comp)) |
(eq_cor_tra & (abs_cor_comp > abs_sag_comp)) |
((abs_sag_comp > abs_cor_comp) & (abs_sag_comp < abs_tra_comp)) |
((abs_sag_comp < abs_cor_comp) & (abs_cor_comp < abs_tra_comp)) |
((abs_sag_comp < abs_tra_comp) & (abs_tra_comp > abs_cor_comp)) |
((abs_cor_comp < abs_tra_comp) & (abs_tra_comp > abs_sag_comp))):
if debug:
print('Mainly transverse.')
case = 2 # Transverse
elif ((eq_sag_cor & (abs_sag_comp > abs_tra_comp)) |
(eq_sag_tra & (abs_sag_comp < abs_cor_comp)) |
((abs_sag_comp < abs_cor_comp) & (abs_cor_comp > abs_tra_comp)) |
((abs_sag_comp > abs_tra_comp) & (abs_sag_comp < abs_cor_comp)) |
((abs_sag_comp < abs_tra_comp) & (abs_tra_comp < abs_cor_comp))):
if debug:
print('Mainly coronal.')
case = 1 # Coronal
elif ((eq_cor_tra & (abs_cor_comp < abs_sag_comp)) |
((abs_sag_comp > abs_cor_comp) & (abs_sag_comp > abs_tra_comp)) |
((abs_cor_comp > abs_tra_comp) & (abs_cor_comp < abs_sag_comp)) |
((abs_cor_comp < abs_tra_comp) & (abs_tra_comp < abs_sag_comp))):
if debug:
print('Mainly sagittal.')
case = 0 # Sagital
else: # Invalid slice orientation...
raise ValueError('Error: Invalid slice orientation')
return case
def calc_prs(gs, phi, debug):
''' Python implementation of IDEA-VB17/n4/pkg/MrServers/MrMeasSrv/SeqFW/libGSL/fGSLCalcPRS.cpp
Calculates the phase encoding and readout direction vectors
:param gs: The GS vector (= slice normal vector)
:param phi: The rotational angle around Gs
:return: gp: phase direction vector
:return: gr: read direction vector
'''
# PCS axes
SAGITTAL = 0
CORONAL = 1
TRANSVERSE = 2
# Start of function
orientation = 0 # will be one of SAGITTAL, CORONAL or TRANSVERSE (0, 1, or 2)
orientation = orientation + class_ori(gs[SAGITTAL], gs[CORONAL], gs[TRANSVERSE], debug)
gp = np.zeros((3), dtype=float)
if orientation == TRANSVERSE:
gp[0] = 0.0
gp[1] = gs[2] * np.sqrt(1. / (gs[1] * gs[1] + gs[2] * gs[2]))
gp[2] = -gs[1] * np.sqrt(1. / (gs[1] * gs[1] + gs[2] * gs[2]))
elif orientation == CORONAL:
gp[0] = gs[1] * np.sqrt(1. / (gs[0] * gs[0] + gs[1] * gs[1]))
gp[1] = -gs[0] * np.sqrt(1. / (gs[0] * gs[0] + gs[1] * gs[1]))
gp[2] = 0.0
elif orientation == SAGITTAL:
gp[0] = -gs[1] * np.sqrt(1. / (gs[0] * gs[0] + gs[1] * gs[1]))
gp[1] = gs[0] * | np.sqrt(1. / (gs[0] * gs[0] + gs[1] * gs[1])) | numpy.sqrt |
import h5py
import numpy as np
import numpy.ma as ma
import numpy.lib.recfunctions as rfn
import logging
ref_region_dtype = np.dtype([('start','i8'), ('stop','i8')])
def print_ref(grp):
'''
Print out all references in file (or group)
'''
l = list()
grp.visititems(lambda n,d: l.append((n,d))\
if n.endswith('/ref') and isinstance(d,h5py.Dataset)
else None
)
if not len(l):
return
max_length = max([len(n) for n,d in l])
for n,d in l:
print(n+' '*(max_length-len(n))+' '+str(d))
def print_data(grp):
'''
Print out all datasets in file (or group)
'''
l = list()
grp.visititems(lambda n,d: l.append((n,d))\
if n.endswith('/data') and isinstance(d,h5py.Dataset)
else None
)
if not len(l):
return
max_length = max([len(n) for n,d in l])
for n,d in l:
print(n+' '*(max_length-len(n))+' '+str(d))
def print_attr(grp):
'''
Print out all attributes in file (or group)
'''
l = list()
grp.visititems(lambda n,d: l.append((n,d.attrs))\
if len(d.attrs) and not (n.endswith('/ref') or n.endswith('/ref_region'))\
else None
)
if not len(l):
return
max_length = max([len(k) for n,d in l for k in d])
for n,d in l:
print(n)
for k,v in d.items():
print('\t'+k+':'+' '*(max_length-len(k))+' '+str(v))
def dereference_chain(sel, refs, data=None, regions=None, mask=None, ref_directions=None, indices_only=False):
'''
Load a "chain" of references. Allows traversal of multiple layers of references,
e.g. for three datasets ``A``, ``B``, and ``C`` linked ``A->B->C``. One
can use a selection in ``A`` and load the ``C`` data associated with it.
Example usage::
sel = slice(0,100)
refs = [f['A/ref/B/ref'], f['C/ref/B/ref']]
ref_dirs = [(0,1), (1,0)]
data = f['C/data']
regions = [f['A/ref/B/ref_region'], f['B/ref/C/ref_region']]
mask = np.r_[sel] > 50
c_data = dereference_chain(sel, refs, data, regions=regions, mask=mask, ref_directions=ref_dirs)
c_data.shape # (100, max_a2b_assoc, max_b2c_assoc)
:param sel: iterable of indices, a slice, or an integer, see ``sel`` argument in ``dereference``
:param refs: a list of reference datasets to load, in order, see ``ref`` argument in ``dereference``
:param data: a dataset to load dereferenced data from, optional if ``indices_only=True``
:param regions: lookup table into ``refs`` for each selection, see ``region`` argument in ``dereference``
:param mask: a boolean mask into the first selection, true will not load the entry
:param ref_directions: intepretation of reference datasets, see ``ref_direction`` argument in ``dereference``
:param indices_only: flag to skip loading the data and instead just return indices into the final dataset
'''
sel = np.r_[sel]
mask = np.zeros_like(sel, dtype=bool) | (mask if mask is not None else False)
sel = ma.array(sel, mask=mask, shrink=False)
shape = (len(sel),)
dref = None
nsteps = len(refs)
for i in range(nsteps):
dset = data if i == nsteps-1 else None
ref = refs[i]
ref_dir = ref_directions[i] if ref_directions else (0,1) # default to (0,1)
reg = regions[i] if regions else None
dref = dereference(sel.data.ravel(), ref,
data=dset, region=reg,
mask=mask.ravel(), ref_direction=ref_dir,
indices_only=True if i != nsteps-1 else indices_only)
shape += dref.shape[-1:]
mask = np.expand_dims(mask, axis=-1) | \
(rfn.structured_to_unstructured(dref.mask).any(axis=-1).reshape(shape) \
if dref.mask.dtype.kind == 'V' else dref.mask.reshape(shape))
dref = ma.array(dref.data.reshape(shape), mask=mask, shrink=False)
if i != nsteps-1:
sel = dref
return dref
def dereference(sel, ref, data=None, region=None, mask=None, ref_direction=(0,1), indices_only=False, as_masked=True):
'''
Load ``data`` referred to by ``ref`` that corresponds to the desired
positions specified in ``sel``.
:param sel: iterable of indices, an index, or a ``slice`` to match against ``ref[:,ref_direction[0]]``. Return value will have same first dimension as ``sel``, e.g. ``dereference(slice(100), ref, data).shape[0] == 100``
:param ref: a shape (N,2) ``h5py.Dataset`` or array of pairs of indices linking ``sel`` and ``data``
:param data: a ``h5py.Dataset`` or array to load dereferenced data from, can be omitted if ``indices_only==True``
:param region: a 1D ``h5py.Dataset`` or array with a structured array type of [('start','i8'), ('stop','i8')]; 'start' defines the earliest index within the ``ref`` dataset for each value in ``sel``, and 'stop' defines the last index + 1 within the ``ref`` dataset (optional). If a ``h5py.Dataset`` is used, the ``sel`` spec will be used to load data from the dataset (i.e. ``region[sel]``), otherwise ``len(sel) == len(region)`` and a 1:1 correspondence is assumed
:param mask: mask off specific items in selection (boolean, True == don't dereference selection), len(mask) == len(np.r_[sel])
:param ref_direction: defines how to interpret second dimension of ``ref``. ``ref[:,ref_direction[0]]`` are matched against items in ``sel``, and ``ref[:,ref_direction[1]]`` are indices into the ``data`` array (``default=(0,1)``). So for a simple example: ``dereference([0,1,2], [[1,0], [2,1]], ['A','B','C','D'], ref_direction=(0,1))`` returns an array equivalent to ``[[],['A'],['B']]`` and ``dereference([0,1,2], [[1,0], [2,1]], ['A','B','C','D'], ref_direction=(1,0))`` returns an array equivalent to ``[['B'],['C'],[]]``
:param indices_only: if ``True``, only returns the indices into ``data``, does not fetch data from ``data``
:returns: ``numpy`` masked array (or if ``as_masked=False`` a ``list``) of length equivalent to ``sel``
'''
# set up selection
sel_mask = mask
sel_idcs = np.r_[sel][~sel_mask] if sel_mask is not None else np.r_[sel]
n_elem = len(sel_idcs) if sel_mask is None else len(sel_mask)
return_dtype = data.dtype if not indices_only else ref.dtype
if not len(sel_idcs) and n_elem:
# special case for if there is nothing selected in the mask
if as_masked:
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True, shrink=False)
else:
return [np.empty(0, data.dtype) for _ in range(n_elem)]
elif not len(sel_idcs):
if as_masked:
return ma.array(np.empty((0,1), dtype=return_dtype), mask=True, shrink=False)
else:
return []
# load fast region lookup
if region is not None:
if isinstance(region, h5py.Dataset):
if isinstance(sel, slice):
region = region[sel] # load parent reference region information
else:
region_offset = np.min(sel_idcs)
region_sel = slice(region_offset, int(np.max(sel_idcs)+1))
region = region[region_sel][sel_idcs - region_offset]
else:
region = region[sel_idcs]
# load relevant references
region_valid = region['start'] != region['stop'] if region is not None else None
if not region is None and np.count_nonzero(region_valid) == 0:
# special case for if there are no valid references
if as_masked:
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True)
else:
return [np.empty(0, return_dtype) for _ in range(n_elem)]
ref_offset = np.min(region[region_valid]['start']) if region is not None else 0
ref_sel = slice(ref_offset, int(np.max(region[region_valid]['stop']))) if region is not None else slice(ref_offset,len(ref))
ref = ref[ref_sel]
# if no valid references, return
if len(ref) == 0:
if as_masked:
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True)
else:
return [np.empty(0, return_dtype) for _ in range(n_elem)]
# load relevant data
dset_offset = np.min(ref[:,ref_direction[1]])
dset_sel = slice(dset_offset, int(np.max(ref[:,ref_direction[1]])+1))
dset = data[dset_sel] if not indices_only else None # load child dataset region
# create a region array, if one was not given
if region is None:
region = np.zeros(len(sel_idcs), dtype=ref_region_dtype)
region['start'] = ref_sel.start
region['stop'] = ref_sel.stop
if not as_masked:
# dump into list using subregion masks
if indices_only:
indices = [
ref[st:sp,ref_direction[1]][ (ref[st:sp,ref_direction[0]] == i) ]
for i,st,sp in zip(sel_idcs, region['start']-ref_offset, region['stop']-ref_offset)
]
return indices
else:
data = [
dset[ref[st:sp,ref_direction[1]][ (ref[st:sp,ref_direction[0]] == i) ] - dset_offset]
for i,st,sp in zip(sel_idcs, region['start']-ref_offset, region['stop']-ref_offset)
]
return data
# the rest of this is index manipulation to convert from sel -> ref -> data
# first using only the unique references and then casting it back into the
# original selection
# first get mapping from unique selection back into the selection
uniq_sel, uniq_inv = np.unique(sel_idcs, return_inverse=True)
# only use references that are relevant to the selection
ref_mask = np.isin(ref[:,ref_direction[0]], uniq_sel)
if not np.any(ref_mask):
# special case if no valid references for selection
return ma.array(np.empty((n_elem,1), dtype=return_dtype), mask=True, shrink=False)
# get the number of references per parent and rearrange so that references are in ordered by parent
uniq, counts = np.unique(ref[ref_mask,ref_direction[0]], return_counts=True)
reordering = np.argsort(uniq)
uniq, counts = uniq[reordering], counts[reordering]
max_counts = np.max(counts)
# now, we'll fill a subarray consisting of unique elements that were requested (shape: (len(uniq_sel), max_counts) )
# get a mapping from the unique selection into the unique reference parents
_,uniq_sel_idcs,uniq2uniq_sel_idcs = np.intersect1d(uniq_sel, uniq, assume_unique=False, return_indices=True)
# set up subarrays for unique selection
shape = (len(uniq_sel), max_counts)
condensed_data = np.zeros(shape, dtype=return_dtype)
condensed_mask = np.zeros(shape, dtype=bool)
# block off and prepare slots for unique selection
condensed_mask[uniq_sel_idcs] = np.arange(condensed_data.shape[1]).reshape(1,-1) < counts[uniq2uniq_sel_idcs].reshape(-1,1)
view_dtype = np.dtype([('ref0',ref.dtype),('ref1',ref.dtype)])
sort_ref = np.argsort(ref[ref_mask].view(view_dtype), axis=0,
order=[view_dtype.names[ref_direction[0]], view_dtype.names[ref_direction[1]]]
) # arrange by parent (then by child)
# and fill slots
if indices_only:
| np.place(condensed_data, mask=condensed_mask, vals=ref[ref_mask,ref_direction[1]][sort_ref]) | numpy.place |
#!/usr/bin/env python
# file: LogisticRegression.py
#===============================================================================
# Copyright 2014-2018 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were
# provided to you (License). Unless the License provides otherwise, you may not
# use, modify, copy, publish, distribute, disclose or transmit this software or
# the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express
# or implied warranties, other than those that are expressly stated in the
# License.
#===============================================================================
import daal4py as d4p
import numpy as np
class LogisticRegression:
'''
....Constructor to set LogisticRegression compute parameters
....'''
def __init__(
self,
nClasses,
dtype="float",
penaltyL1=0,
penaltyL2=0,
interceptFlag=True,
resultsToCompute="computeClassesLabels",
optSolverParam = {}
):
self.dtype = dtype
self.nClasses = nClasses
self.penaltyL1 = penaltyL1
self.penaltyL2 = penaltyL2
self.interceptFlag = interceptFlag
self.optSolverParam = optSolverParam
self.resultsToCompute = resultsToCompute
def train(self, train_data, train_labels):
dtype = (np.float64 if self.dtype == "double" else np.float32)
optSolver = None
#create a solver
if self.optSolverParam['solverName'] == 'sgd':
lrs = np.array([[self.optSolverParam['solverLearningRate']]], dtype=dtype)
batchSize_ = int(self.optSolverParam['solverBatchSize'])
method = self.optSolverParam["solverMethod"]
if method == "defaultDense":
batchSize_ = 1
optSolver = d4p.optimization_solver_sgd(function = None, learningRateSequence = lrs,
method = method,
accuracyThreshold = dtype(self.optSolverParam['solverAccuracyThreshold']),
nIterations = int(self.optSolverParam['solverMaxIterations']),
batchSize = batchSize_
)
if self.optSolverParam['solverName'] == 'lbfgs':
sls = np.array([[self.optSolverParam['solverStepLength']]], dtype=dtype)
optSolver = d4p.optimization_solver_lbfgs(function = None,
stepLengthSequence=sls,
accuracyThreshold = dtype(self.optSolverParam['solverAccuracyThreshold']),
nIterations = int(self.optSolverParam['solverMaxIterations']),
batchSize = int(self.optSolverParam['solverBatchSize']),
correctionPairBatchSize = int(self.optSolverParam['solverCorrectionPairBatchSize']),
L = int(self.optSolverParam['solverL'])
)
if self.optSolverParam['solverName'] == 'adagrad':
lr = | np.array([[self.optSolverParam['solverLearningRate']]], dtype=dtype) | numpy.array |
# coding: utf-8
# Copyright (c) MoGroup at UMD.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
from monty.json import MSONable
from scipy import stats
from scipy.optimize import curve_fit
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.util.coord import pbc_diff
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Specie
import csv
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__date__ = "6/6/2017"
__credit__ = "Pymatgen Development Team"
class DiffusivityAnalyzer(MSONable):
def __init__(self, structure, displacements, specie, temperature,
time_step, step_skip, time_intervals_number=1000,
spec_dict=None):
"""
Calculate MSD from pre-processed data, and implemented linear fitting to obtain diffusivity.
:param structure (Structure): initial structure
:param displacements (np.array): numpy array, shape is [n_ions, n_steps, axis]
:param specie (str): species string, can be Li or Li+, make sure structure has oxidation
state accordingly.
:param temperature (float): temperature of MD
:param time_step (float): time step in MD
:param step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
:param time_intervals_number (int): number of time intervals. Default is 1000
means there are ~1000 time intervals.
:param spec_dict (dict): spec dict of linear fitting. Default is
{'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}
lower_bound is in unit of Angstrom square
upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total
minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger
than minimum_msd_diff to do linear fitting.
"""
spec_dict = spec_dict if spec_dict is not None else {'lower_bound': 4.5, 'upper_bound': 0.5,
'minimum_msd_diff': 4.5}
if not {'lower_bound', 'upper_bound', 'minimum_msd_diff'} <= set(spec_dict.keys()):
raise Exception("spec_dict does not have enough parameters.")
time_step_displacements = time_step * step_skip
# prepare
indices = []
framework_indices = []
for i, site in enumerate(structure):
if site.specie.symbol == specie:
indices.append(i)
else:
framework_indices.append(i)
if len(indices) == 0:
raise Exception("There is no specie {} in the structure".format(specie))
if len(framework_indices) == 0:
dc = displacements
else:
framework_disp = displacements[framework_indices]
drift = np.average(framework_disp, axis=0)[None, :, :]
dc = displacements - drift
df = structure.lattice.get_fractional_coords(dc)
displacements_final_diffusion_ions = dc[indices]
displacements_frac_final_diffusion_ions = df[indices]
n_ions, n_steps, dim = displacements_final_diffusion_ions.shape
# time intervals, dt
dt_indices = np.arange(1, n_steps, max(int((n_steps - 1) / time_intervals_number), 1))
dt = dt_indices * time_step_displacements
# calculate msd
# define functions, algorithm from
# http://stackoverflow.com/questions/34222272/computing-mean-square-displacement-using-python-and-fft
def autocorrelation_fft(x):
N = x.shape[0]
F = np.fft.fft(x, n=2 * N)
PSD = F * F.conjugate()
res = np.fft.ifft(PSD)
res = (res[:N]).real
n = N * np.ones(N) - | np.arange(N) | numpy.arange |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
# Differential filter
def differential_filter(img, K_size=3):
H, W = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)
tmp = out.copy()
outv = out.copy()
outh = out.copy()
# vertical kernel
Kv = [[0., -1., 0.], [0., 1., 0.], [0., 0., 0.]]
# horizontal kernel
Kh = [[0., 0., 0.], [-1., 1., 0.], [0., 0., 0.]]
## filtering
for y in range(H):
for x in range(W):
outv[pad+y, pad+x] = np.sum(Kv * tmp[y:y+K_size, x:x+K_size])
outh[pad+y, pad+x] = | np.sum(Kh * tmp[y:y+K_size, x:x+K_size]) | numpy.sum |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Quantum algorithms
==================
.. currentmodule:: thewalrus.quantum
This submodule provides access to various utility functions that act on Gaussian
quantum states.
For more details on how the hafnian relates to various properties of Gaussian quantum
states, see:
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
"A detailed study of Gaussian Boson Sampling." `arXiv:1801.07488. (2018).
<https://arxiv.org/abs/1801.07488>`_
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
"Gaussian boson sampling." `Physical review letters, 119(17), 170501. (2017).
<https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.119.170501>`_
* <NAME>.
"Franck-Condon factors by counting perfect matchings of graphs with loops."
`Journal of Chemical Physics 150, 164113 (2019). <https://aip.scitation.org/doi/10.1063/1.5086387>`_
* <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
"Simulating realistic non-Gaussian state preparation." `arXiv:1905.07011. (2019). <https://arxiv.org/abs/1905.07011>`_
Fock states
-----------
.. autosummary::
pure_state_amplitude
state_vector
density_matrix_element
density_matrix
Details
^^^^^^^
.. autofunction::
pure_state_amplitude
.. autofunction::
state_vector
.. autofunction::
density_matrix_element
.. autofunction::
density_matrix
Utility functions
-----------------
.. autosummary::
reduced_gaussian
Xmat
Sympmat
Qmat
Covmat
Amat
Beta
Means
prefactor
find_scaling_adjacency_matrix
gen_Qmat_from_graph
is_valid_cov
is_pure_cov
is_classical_cov
total_photon_num_dist_pure_state
gen_single_mode_dist
gen_multi_mode_dist
Details
^^^^^^^
"""
# pylint: disable=too-many-arguments
from itertools import count, product
import numpy as np
from scipy.optimize import root_scalar
from scipy.special import factorial as fac
from scipy.stats import nbinom
from ._hafnian import hafnian, hafnian_repeated, reduction
from ._hermite_multidimensional import hermite_multidimensional, hafnian_batched
def reduced_gaussian(mu, cov, modes):
r""" Returns the vector of means and the covariance matrix of the specified modes.
Args:
mu (array): a length-:math:`2N` ``np.float64`` vector of means.
cov (array): a :math:`2N\times 2N` ``np.float64`` covariance matrix
representing an :math:`N` mode quantum state.
modes (int of Sequence[int]): indices of the requested modes
Returns:
tuple (means, cov): where means is an array containing the vector of means,
and cov is a square array containing the covariance matrix.
"""
N = len(mu) // 2
# reduce rho down to specified subsystems
if isinstance(modes, int):
modes = [modes]
if np.any(np.array(modes) > N):
raise ValueError("Provided mode is larger than the number of subsystems.")
if len(modes) == N:
# reduced state is full state
return mu, cov
ind = np.concatenate([np.array(modes), np.array(modes) + N])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
return mu[ind], cov[rows, cols]
def Xmat(N):
r"""Returns the matrix :math:`X_n = \begin{bmatrix}0 & I_n\\ I_n & 0\end{bmatrix}`
Args:
N (int): positive integer
Returns:
array: :math:`2N\times 2N` array
"""
I = np.identity(N)
O = np.zeros_like(I)
X = np.block([[O, I], [I, O]])
return X
def Sympmat(N):
r"""Returns the matrix :math:`\Omega_n = \begin{bmatrix}0 & I_n\\ -I_n & 0\end{bmatrix}`
Args:
N (int): positive integer
Returns:
array: :math:`2N\times 2N` array
"""
I = np.identity(N)
O = np.zeros_like(I)
S = np.block([[O, I], [-I, O]])
return S
def Qmat(cov, hbar=2):
r"""Returns the :math:`Q` Husimi matrix of the Gaussian state.
Args:
cov (array): :math:`2N\times 2N xp-` Wigner covariance matrix
hbar (float): the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the :math:`Q` matrix.
"""
# number of modes
N = len(cov) // 2
I = np.identity(N)
x = cov[:N, :N] * 2 / hbar
xp = cov[:N, N:] * 2 / hbar
p = cov[N:, N:] * 2 / hbar
# the (Hermitian) matrix elements <a_i^\dagger a_j>
aidaj = (x + p + 1j * (xp - xp.T) - 2 * I) / 4
# the (symmetric) matrix elements <a_i a_j>
aiaj = (x - p + 1j * (xp + xp.T)) / 4
# calculate the covariance matrix sigma_Q appearing in the Q function:
# Q(alpha) = exp[-(alpha-beta).sigma_Q^{-1}.(alpha-beta)/2]/|sigma_Q|
Q = np.block([[aidaj, aiaj.conj()], [aiaj, aidaj.conj()]]) + np.identity(2 * N)
return Q
def Covmat(Q, hbar=2):
r"""Returns the Wigner covariance matrix in the :math:`xp`-ordering of the Gaussian state.
This is the inverse function of Qmat.
Args:
Q (array): :math:`2N\times 2N` Husimi Q matrix
hbar (float): the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the :math:`xp`-ordered covariance matrix in the xp-ordering.
"""
# number of modes
n = len(Q) // 2
I = np.identity(n)
N = Q[0:n, 0:n] - I
M = Q[n : 2 * n, 0:n]
mm11a = 2 * (N.real + M.real) + np.identity(n)
mm22a = 2 * (N.real - M.real) + | np.identity(n) | numpy.identity |
if __name__ == '__main__':
# This is a terrible hack just to be able to execute this file directly
import sys
sys.path.insert(0, '../')
from worlds.game_objects import Actions
import random, math, os, pickle
import numpy as np
"""
Auxiliary class with the configuration parameters that the Game class needs
"""
class WaterWorldParams:
def __init__(self, state_file = None, max_x = 1000, max_y = 700, b_num_colors = 6,
b_radius = 20, b_velocity = 30, b_num_per_color = 10,
use_velocities = True, ball_disappear = True):
self.max_x = max_x
self.max_y = max_y
self.b_num_colors = b_num_colors
self.b_radius = b_radius
self.b_velocity = b_velocity
self.a_vel_delta = b_velocity
self.a_vel_max = 3*b_velocity
self.b_num_per_color = b_num_per_color
self.state_file = state_file
self.use_velocities = use_velocities
self.ball_disappear = ball_disappear
class WaterWorld:
def __init__(self, params):
self.params = params
self.use_velocities = params.use_velocities
self._load_map()
if params.state_file is not None:
self.load_state(params.state_file)
self.env_game_over = False
# Setting up event detectors
self.current_collisions_old = set()
self._update_events()
def _get_current_collision(self):
ret = set()
for b in self.balls:
if self.agent.is_colliding(b):
ret.add(b)
return ret
def _update_events(self):
self.true_props = ""
current_collisions = self._get_current_collision()
for b in current_collisions - self.current_collisions_old:
self.true_props += b.color
self.current_collisions_old = current_collisions
def execute_action(self, a, elapsedTime=0.1):
action = Actions(a)
# computing events
self._update_events()
# if balls disappear, then relocate balls that the agent is colliding before the action
if self.params.ball_disappear:
for b in self.balls:
if self.agent.is_colliding(b):
pos, vel = self._get_pos_vel_new_ball()
b.update(pos, vel)
# updating the agents velocity
self.agent.execute_action(action)
balls_all = [self.agent] + self.balls
max_x, max_y = self.params.max_x, self.params.max_y
# updating position
for b in balls_all:
b.update_position(elapsedTime)
# handling collisions
for i in range(len(balls_all)):
b = balls_all[i]
# walls
if b.pos[0] - b.radius < 0 or b.pos[0] + b.radius > max_x:
# Place ball against edge
if b.pos[0] - b.radius < 0: b.pos[0] = b.radius
else: b.pos[0] = max_x - b.radius
# Reverse direction
b.vel = b.vel * np.array([-1.0,1.0])
if b.pos[1] - b.radius < 0 or b.pos[1] + b.radius > max_y:
# Place ball against edge
if b.pos[1] - b.radius < 0: b.pos[1] = b.radius
else: b.pos[1] = max_y - b.radius
# Reverse directio
b.vel = b.vel * np.array([1.0,-1.0])
def get_actions(self):
"""
Returns the list with the actions that the agent can perform
"""
return self.agent.get_actions()
def get_state(self):
return None # we are only using "simple reward machines" for the craft domain
def get_true_propositions(self):
"""
Returns the string with the propositions that are True in this state
"""
return self.true_props
# The following methods return different feature representations of the map ------------
def get_features(self):
#_,features = self._get_features_Vis()
_,features = self._get_features_HER()
return features
def _get_features_Vis(self):
vel_max = float(self.params.a_vel_max)
range_max = (self.params.max_x**2+self.params.max_y**2)**0.5
max_x = self.params.max_x
max_y = self.params.max_y
radius = self.params.b_radius
agent = self.agent
a_x, a_y = agent.pos[0], agent.pos[1]
# The state space is even larger and continuous:
# The agent has 30 eye sensors pointing in all
# directions and in each direction is observes
# 5 variables: the range, the type of sensed object (green, red),
# and the velocity of the sensed object.
# The agent's proprioception includes two additional sensors for
# its own speed in both x and y directions.
# This is a total of 152-dimensional state space.
# map from object classes to numbers
num_eyes = 16 # in practice, each eye goes to both sides
num_classes = self.params.b_num_colors + 1 # I'm including the walls here
# adding walls
contact_points = {}
for i in range(num_eyes):
# features per eye: range, type, v_x, v_y
angle_pos = i * 180 / num_eyes
angle_neg = angle_pos + 180
# walls collisions
col_pos = []
col_neg = []
if angle_pos == 0:
col_pos.append(np.array([max_x, a_y]))
col_neg.append(np.array([0, a_y]))
elif angle_pos == 90:
col_pos.append(np.array([a_x, max_y]))
col_neg.append(np.array([a_x, 0]))
else:
m = math.tan(math.radians(angle_pos))
c = a_y - m * a_x
w_n = np.array([(max_y - c)/m, max_y])
w_e = np.array([max_x, m*max_x + c])
w_w = np.array([0.0, c])
w_s = np.array([-c/m, 0.0])
if angle_pos < 90:
col_pos.extend([w_n, w_e])
col_neg.extend([w_s, w_w])
else:
col_pos.extend([w_n, w_w])
col_neg.extend([w_s, w_e])
# adding the points
for p in col_pos: add_contact_point(contact_points, angle_pos, (dist(agent.pos,p),p,'W'))
for p in col_neg: add_contact_point(contact_points, angle_neg, (dist(agent.pos,p),p,'W'))
# Adding balls
for b in self.balls:
if agent.is_colliding(b):
continue
# computing the eyes that collide with this ball
dd = dist(agent.pos, b.pos)
theta = math.degrees(math.asin(b.radius/dd))
dx, dy = b.pos[0] - a_x, b.pos[1] - a_y
alpha = normalize_angle(math.degrees(math.atan2(dy, dx)))
alpha_plus = alpha + theta
alpha_minus = alpha - theta
if alpha_minus < 0:
alpha_minus += 360
alpha_plus += 360
i = math.ceil((num_eyes * alpha_minus)/180)
angle = i * 180 / num_eyes
while angle <= alpha_plus:
angle_real = normalize_angle(angle)
# checking that the ball is in the rigth range
if dd-b.radius < contact_points[angle_real][0]:
p, q, r = b.pos[0], b.pos[1], b.radius
if angle_real in [90, 270]:
dis = r**2 - (a_x-p)**2
if dis < 0: # the line misses the ball
print("It missed the ball?")
else: # the line intersects the circle (in one or two points)
for case in [-1,1]:
x_p = a_x
y_p = q+case*dis**0.5
c_p = np.array([x_p,y_p])
add_contact_point(contact_points, angle_real, (dist(agent.pos,c_p),c_p,b))
else:
m = math.tan(math.radians(angle_real))
c = a_y - m * a_x
A = m**2+1
B = 2*(m*c-m*q-p)
C = q**2-r**2+p**2-2*c*q+c**2
dis = B**2-4*A*C
if dis < 0: # the line misses the ball
print("It missed the ball?", alpha, theta, alpha_minus, angle, alpha_plus)
else: # the line intersects the circle (in one or two points)
for case in [-1,1]:
x_p = (-B+case*dis**0.5)/(2*A)
y_p = m*x_p+c
c_p = np.array([x_p,y_p])
add_contact_point(contact_points, angle_real, (dist(agent.pos,c_p),c_p,b))
i += 1
angle = i * 180 / num_eyes
# range, type, v_x, v_y
n_features_per_eye = 3+num_classes
n_features = n_features_per_eye*2*num_eyes+2
features = np.zeros(n_features,dtype=np.float)
colliding_points = []
for i in range(2*num_eyes):
# features per eye: range, type, v_x, v_y
dd, p, obj = contact_points[i * 180 / num_eyes]
colliding_points.append(p)
features[i*n_features_per_eye:(i+1)*n_features_per_eye] = get_eye_features(dd, obj, num_classes, range_max, vel_max)
# adding the agents velocity
features[n_features-2:n_features] = agent.vel / vel_max
return colliding_points, features
def _get_features_HER(self):
# Absolute position and velocity of the anget + relative positions and velocities of the other balls
# with respect to the agent
if self.use_velocities:
agent, balls = self.agent, self.balls
n_features = 4 + len(balls) * 4
features = np.zeros(n_features,dtype=np.float)
pos_max = np.array([float(self.params.max_x), float(self.params.max_y)])
vel_max = float(self.params.b_velocity + self.params.a_vel_max)
features[0:2] = agent.pos/pos_max
features[2:4] = agent.vel/float(self.params.a_vel_max)
for i in range(len(balls)):
# If the balls are colliding, I'll not include them
# (because there us nothing that the agent can do about it)
b = balls[i]
if not self.params.ball_disappear or not agent.is_colliding(b):
init = 4*(i+1)
features[init:init+2] = (b.pos - agent.pos)/pos_max
features[init+2:init+4] = (b.vel - agent.vel)/vel_max
else:
agent, balls = self.agent, self.balls
n_features = 4 + len(balls) * 2
features = np.zeros(n_features,dtype=np.float)
pos_max = np.array([float(self.params.max_x), float(self.params.max_y)])
vel_max = float(self.params.b_velocity + self.params.a_vel_max)
features[0:2] = agent.pos/pos_max
features[2:4] = agent.vel/float(self.params.a_vel_max)
for i in range(len(balls)):
# If the balls are colliding, I'll not include them
# (because there us nothing that the agent can do about it)
b = balls[i]
if not self.params.ball_disappear or not agent.is_colliding(b):
init = 2*i + 4
features[init:init+2] = (b.pos - agent.pos)/pos_max
return [], features
#return [b.pos for b in balls if not agent.is_colliding(b)], features
def _is_collising(self, pos):
for b in self.balls + [self.agent]:
if np.linalg.norm(b.pos - | np.array(pos) | numpy.array |
import numpy as np
from pandas import PeriodIndex
import pandas._testing as tm
class TestFactorize:
def test_factorize(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M"
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M"
)
exp_arr = | np.array([2, 2, 1, 0, 2, 0], dtype=np.intp) | numpy.array |
import numpy as np
a = np.arange(24)
print(a)
# [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
print(a.shape)
# (24,)
print(a.ndim)
# 1
a_4_6 = a.reshape([4, 6])
print(a_4_6)
# [[ 0 1 2 3 4 5]
# [ 6 7 8 9 10 11]
# [12 13 14 15 16 17]
# [18 19 20 21 22 23]]
print(a_4_6.shape)
# (4, 6)
print(a_4_6.ndim)
# 2
a_2_3_4 = a.reshape([2, 3, 4])
print(a_2_3_4)
# [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
print(a_2_3_4.shape)
# (2, 3, 4)
print(a_2_3_4.ndim)
# 3
# a_5_6 = a.reshape([5, 6])
# ValueError: cannot reshape array of size 24 into shape (5,6)
print(a.reshape(4, 6))
# [[ 0 1 2 3 4 5]
# [ 6 7 8 9 10 11]
# [12 13 14 15 16 17]
# [18 19 20 21 22 23]]
print(a.reshape(2, 3, 4))
# [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
print(np.reshape(a, [4, 6]))
# [[ 0 1 2 3 4 5]
# [ 6 7 8 9 10 11]
# [12 13 14 15 16 17]
# [18 19 20 21 22 23]]
print(np.reshape(a, [2, 3, 4]))
# [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
# print(np.reshape(a, [5, 6]))
# ValueError: cannot reshape array of size 24 into shape (5,6)
print(a.reshape(4, 6))
# [[ 0 1 2 3 4 5]
# [ 6 7 8 9 10 11]
# [12 13 14 15 16 17]
# [18 19 20 21 22 23]]
# print(np.reshape(a, 4, 6))
# ValueError: cannot reshape array of size 24 into shape (4,)
print(a.reshape([4, 6], order='C'))
# [[ 0 1 2 3 4 5]
# [ 6 7 8 9 10 11]
# [12 13 14 15 16 17]
# [18 19 20 21 22 23]]
print(a.reshape([4, 6], order='F'))
# [[ 0 4 8 12 16 20]
# [ 1 5 9 13 17 21]
# [ 2 6 10 14 18 22]
# [ 3 7 11 15 19 23]]
print(a.reshape([2, 3, 4], order='C'))
# [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
print(a.reshape([2, 3, 4], order='F'))
# [[[ 0 6 12 18]
# [ 2 8 14 20]
# [ 4 10 16 22]]
#
# [[ 1 7 13 19]
# [ 3 9 15 21]
# [ 5 11 17 23]]]
print(np.reshape(a, [4, 6], order='F'))
# [[ 0 4 8 12 16 20]
# [ 1 5 9 13 17 21]
# [ 2 6 10 14 18 22]
# [ 3 7 11 15 19 23]]
# print(a.reshape([4, 6], 'F'))
# TypeError: 'list' object cannot be interpreted as an integer
print(np.reshape(a, [4, 6], 'F'))
# [[ 0 4 8 12 16 20]
# [ 1 5 9 13 17 21]
# [ 2 6 10 14 18 22]
# [ 3 7 11 15 19 23]]
print(a.reshape([4, -1]))
# [[ 0 1 2 3 4 5]
# [ 6 7 8 9 10 11]
# [12 13 14 15 16 17]
# [18 19 20 21 22 23]]
print(a.reshape([2, -1, 4]))
# [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
# print(a.reshape([2, -1, -1]))
# ValueError: can only specify one unknown dimension
# print(a.reshape([2, -1, 5]))
# ValueError: cannot reshape array of size 24 into shape (2,newaxis,5)
a = np.arange(8)
print(a)
# [0 1 2 3 4 5 6 7]
a_2_4 = a.reshape([2, 4])
print(a_2_4)
# [[0 1 2 3]
# [4 5 6 7]]
print(np.shares_memory(a, a_2_4))
# True
a[0] = 100
print(a)
# [100 1 2 3 4 5 6 7]
print(a_2_4)
# [[100 1 2 3]
# [ 4 5 6 7]]
a_2_4[0, 0] = 0
print(a_2_4)
# [[0 1 2 3]
# [4 5 6 7]]
print(a)
# [0 1 2 3 4 5 6 7]
a_2_4_copy = a.reshape([2, 4]).copy()
print(a_2_4_copy)
# [[0 1 2 3]
# [4 5 6 7]]
print(np.shares_memory(a, a_2_4_copy))
# False
a[0] = 100
print(a)
# [100 1 2 3 4 5 6 7]
print(a_2_4_copy)
# [[0 1 2 3]
# [4 5 6 7]]
a_2_4_copy[0, 0] = 200
print(a_2_4_copy)
# [[200 1 2 3]
# [ 4 5 6 7]]
print(a)
# [100 1 2 3 4 5 6 7]
a = np.arange(6).reshape(2, 3)
print(a)
# [[0 1 2]
# [3 4 5]]
a_step = a[:, ::2]
print(a_step)
# [[0 2]
# [3 5]]
print(a_step.reshape(-1))
# [0 2 3 5]
print(np.shares_memory(a_step, a_step.reshape(-1)))
# False
| np.info(a) | numpy.info |
#!/usr/bin/env python
#########################################################################################
# Spinal Cord Registration module
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2020 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
#
# License: see the LICENSE.TXT
#########################################################################################
import logging
import os # FIXME
import shutil
from math import asin, cos, sin, acos
import numpy as np
from scipy import ndimage
from nibabel import load, Nifti1Image, save
from scipy.signal import argrelmax, medfilt
from sklearn.decomposition import PCA
from scipy.io import loadmat
import spinalcordtoolbox.image as image
from spinalcordtoolbox.math import laplacian
from spinalcordtoolbox.registration.landmarks import register_landmarks
from spinalcordtoolbox.utils import sct_progress_bar, copy_helper, run_proc, tmp_create
# TODO [AJ]
# introduce potential cleanup functions in case exceptions occur and
# filesystem is left with temp artefacts everywhere?
logger = logging.getLogger(__name__)
class Paramreg(object):
def __init__(self, step=None, type=None, algo='syn', metric='MeanSquares', samplingStrategy='None',
samplingPercentage='0.2', iter='10', shrink='1', smooth='0', gradStep='0.5', deformation='1x1x0',
init='', filter_size=5, poly='5', slicewise='0', laplacian='0', dof='Tx_Ty_Tz_Rx_Ry_Rz',
smoothWarpXY='2', pca_eigenratio_th='1.6', rot_method='pca'):
"""
Class to define registration method.
:param step: int: Step number (starts at 1, except for type=label which corresponds to step=0).
:param type: {im, seg, imseg, label} Type of data used for registration. Use type=label only at step=0.
:param algo:
:param metric:
:param samplingStrategy: {'Regular', 'Random', 'None'}
:param samplingPercentage: [0, 1]
:param iter:
:param shrink:
:param smooth:
:param gradStep:
:param deformation:
:param init:
:param filter_size: int: Size of the Gaussian kernel when filtering the cord rotation estimate across z.
:param poly:
:param slicewise: {'0', '1'}: Slice-by-slice 2d transformation.
:param laplacian:
:param dof:
:param smoothWarpXY:
:param pca_eigenratio_th:
:param rot_method: {'pca', 'hog', 'pcahog'}: Rotation method to be used with algo=centermassrot.
pca: approximate cord segmentation by an ellipse and finds it orientation using PCA's
eigenvectors; hog: finds the orientation using the symmetry of the image; pcahog: tries method pca and if it
fails, uses method hog. If using hog or pcahog, type should be set to 'imseg'."
"""
self.step = step
self.type = type
self.algo = algo
self.metric = metric
self.samplingStrategy = samplingStrategy
self.samplingPercentage = samplingPercentage
self.iter = iter
self.shrink = shrink
self.smooth = smooth
self.laplacian = laplacian
self.gradStep = gradStep
self.deformation = deformation
self.slicewise = slicewise
self.init = init
self.poly = poly # only for algo=slicereg
self.filter_size = filter_size # only for algo=centermassrot
self.dof = dof # only for type=label
self.smoothWarpXY = smoothWarpXY # only for algo=columnwise
self.pca_eigenratio_th = pca_eigenratio_th # only for algo=centermassrot
self.rot_method = rot_method # only for algo=centermassrot
self.rot_src = None # this variable is used to set the angle of the cord on the src image if it is known
self.rot_dest = None # same as above for the destination image (e.g., if template, should be set to 0)
# list of possible values for self.type
self.type_list = ['im', 'seg', 'imseg', 'label']
# update constructor with user's parameters
def update(self, paramreg_user):
list_objects = paramreg_user.split(',')
for object in list_objects:
if len(object) < 2:
raise ValueError("Invalid use of -param! Check usage (usage changed from previous version)")
obj = object.split('=')
setattr(self, obj[0], obj[1])
class ParamregMultiStep:
"""
Class to aggregate multiple Paramreg() classes into a dictionary. The method addStep() is used to build this class.
"""
def __init__(self, listParam=[]):
self.steps = dict()
for stepParam in listParam:
if isinstance(stepParam, Paramreg):
self.steps[stepParam.step] = stepParam
else:
self.addStep(stepParam)
def addStep(self, stepParam):
"""
Checks if the step is already present.
If it exists: update it.
If not: add it.
"""
param_reg = Paramreg()
param_reg.update(stepParam)
if param_reg.step is None:
raise ValueError("Parameters must contain 'step'!")
else:
if param_reg.step in self.steps:
self.steps[param_reg.step].update(stepParam)
else:
self.steps[param_reg.step] = param_reg
if int(param_reg.step) != 0 and param_reg.type not in param_reg.type_list:
raise ValueError("Parameters must contain a type, either 'im' or 'seg'")
def register_step_ants_slice_regularized_registration(src, dest, step, metricSize, fname_mask='', verbose=1):
"""
"""
# Find the min (and max) z-slice index below which (and above which) slices only have voxels below a given
# threshold.
list_fname = [src, dest]
if fname_mask:
list_fname.append(fname_mask)
mask_options = ['-x', fname_mask]
else:
mask_options = []
zmin_global, zmax_global = 0, 99999 # this is assuming that typical image has less slice than 99999
for fname in list_fname:
im = image.Image(fname)
zmin, zmax = image.find_zmin_zmax(im, threshold=0.1)
if zmin > zmin_global:
zmin_global = zmin
if zmax < zmax_global:
zmax_global = zmax
# crop images (see issue #293)
src_crop = image.add_suffix(src, '_crop')
image.spatial_crop(image.Image(src), dict(((2, (zmin_global, zmax_global)),))).save(src_crop)
dest_crop = image.add_suffix(dest, '_crop')
image.spatial_crop(image.Image(dest), dict(((2, (zmin_global, zmax_global)),))).save(dest_crop)
# update variables
src = src_crop
dest = dest_crop
scr_regStep = image.add_suffix(src, '_regStep' + str(step.step))
# estimate transfo
cmd = ['isct_antsSliceRegularizedRegistration',
'-t', 'Translation[' + step.gradStep + ']',
'-m', step.metric + '['
+ ','.join([dest, src, '1', metricSize, step.samplingStrategy, step.samplingPercentage]) + ']',
'-p', step.poly,
'-i', step.iter,
'-f', step.shrink,
'-s', step.smooth,
'-v', '1', # verbose (verbose=2 does not exist, so we force it to 1)
'-o', '[step' + str(step.step) + ',' + scr_regStep + ']', # here the warp name is stage10 because
# antsSliceReg add "Warp"
] + mask_options
# Filepaths for output files generated by isct_antsSliceRegularizedRegistration
warp_forward_out = 'step' + str(step.step) + 'Warp.nii.gz'
warp_inverse_out = 'step' + str(step.step) + 'InverseWarp.nii.gz'
txty_csv_out = 'step' + str(step.step) + 'TxTy_poly.csv'
# FIXME: Allow these filepaths be specified as input arguments (to provide control over where files are output to)
# run command
status, output = run_proc(cmd, verbose, is_sct_binary=True)
return warp_forward_out, warp_inverse_out, txty_csv_out
def register_step_ants_registration(src, dest, step, masking, ants_registration_params, padding, metricSize, verbose=1):
"""
"""
# Pad the destination image (because ants doesn't deform the extremities)
# N.B. no need to pad if iter = 0
if not step.iter == '0':
dest_pad = image.add_suffix(dest, '_pad')
run_proc(['sct_image', '-i', dest, '-o', dest_pad, '-pad', '0,0,' + str(padding)])
dest = dest_pad
# apply Laplacian filter
if not step.laplacian == '0':
logger.info(f"\nApply Laplacian filter")
sigmas = [step.laplacian, step.laplacian, 0]
src_img = image.Image(src)
src_out = src_img.copy()
src = image.add_suffix(src, '_laplacian')
dest = image.add_suffix(dest, '_laplacian')
sigmas = [sigmas[i] / src_img.dim[i + 4] for i in range(3)]
src_out.data = laplacian(src_out.data, sigmas)
src_out.save(path=src)
dest_img = image.Image(dest)
dest_out = dest_img.copy()
dest_out.data = laplacian(dest_out.data, sigmas)
dest_out.save(path=dest)
# Estimate transformation
logger.info(f"\nEstimate transformation")
scr_regStep = image.add_suffix(src, '_regStep' + str(step.step))
cmd = ['isct_antsRegistration',
'--dimensionality', '3',
'--transform', step.algo + '[' + step.gradStep
+ ants_registration_params[step.algo.lower()] + ']',
'--metric', step.metric + '[' + dest + ',' + src + ',1,' + metricSize + ']',
'--convergence', step.iter,
'--shrink-factors', step.shrink,
'--smoothing-sigmas', step.smooth + 'mm',
'--restrict-deformation', step.deformation,
'--output', '[step' + str(step.step) + ',' + scr_regStep + ']',
'--interpolation', 'BSpline[3]',
'--verbose', '1',
] + masking
# add init translation
if step.init:
init_dict = {'geometric': '0', 'centermass': '1', 'origin': '2'}
cmd += ['-r', '[' + dest + ',' + src + ',' + init_dict[step.init] + ']']
# run command
status, output = run_proc(cmd, verbose, is_sct_binary=True)
# get appropriate file name for transformation
if step.algo in ['rigid', 'affine', 'translation']:
warp_forward_out = 'step' + str(step.step) + '0GenericAffine.mat'
warp_inverse_out = '-step' + str(step.step) + '0GenericAffine.mat'
else:
warp_forward_out = 'step' + str(step.step) + '0Warp.nii.gz'
warp_inverse_out = 'step' + str(step.step) + '0InverseWarp.nii.gz'
return warp_forward_out, warp_inverse_out
def register_step_slicewise_ants(src, dest, step, ants_registration_params, fname_mask, remove_temp_files, verbose=1):
"""
"""
# if shrink!=1, force it to be 1 (otherwise, it generates a wrong 3d warping field). TODO: fix that!
if not step.shrink == '1':
logger.warning(f"\nWhen using slicewise with SyN or BSplineSyN, shrink factor needs to be one. Forcing shrink=1")
step.shrink = '1'
warp_forward_out = 'step' + str(step.step) + 'Warp.nii.gz'
warp_inverse_out = 'step' + str(step.step) + 'InverseWarp.nii.gz'
register_slicewise(
fname_src=src,
fname_dest=dest,
paramreg=step,
fname_mask=fname_mask,
warp_forward_out=warp_forward_out,
warp_inverse_out=warp_inverse_out,
ants_registration_params=ants_registration_params,
remove_temp_files=remove_temp_files,
verbose=verbose
)
return warp_forward_out, warp_inverse_out
def register_step_slicewise(src, dest, step, ants_registration_params, remove_temp_files, verbose=1):
"""
"""
# smooth data
if not step.smooth == '0':
logger.warning(f"\nAlgo {step.algo} will ignore the parameter smoothing.\n")
warp_forward_out = 'step' + str(step.step) + 'Warp.nii.gz'
warp_inverse_out = 'step' + str(step.step) + 'InverseWarp.nii.gz'
register_slicewise(
fname_src=src,
fname_dest=dest,
paramreg=step,
fname_mask='',
warp_forward_out=warp_forward_out,
warp_inverse_out=warp_inverse_out,
ants_registration_params=ants_registration_params,
remove_temp_files=remove_temp_files,
verbose=verbose
)
return warp_forward_out, warp_inverse_out
def register_step_label(src, dest, step, verbose=1):
"""
"""
warp_forward_out = 'step' + step.step + '0GenericAffine.txt'
warp_inverse_out = '-step' + step.step + '0GenericAffine.txt'
register_landmarks(src,
dest,
step.dof,
fname_affine=warp_forward_out,
verbose=verbose)
return warp_forward_out, warp_inverse_out
def register_slicewise(fname_src, fname_dest, paramreg=None, fname_mask='', warp_forward_out='step0Warp.nii.gz',
warp_inverse_out='step0InverseWarp.nii.gz', ants_registration_params=None,
path_qc='./', remove_temp_files=0, verbose=0):
"""
Main function that calls various methods for slicewise registration.
:param fname_src: Str or List: If List, first element is image, second element is segmentation.
:param fname_dest: Str or List: If List, first element is image, second element is segmentation.
:param paramreg: Class Paramreg()
:param fname_mask:
:param warp_forward_out:
:param warp_inverse_out:
:param ants_registration_params:
:param path_qc:
:param remove_temp_files:
:param verbose:
:return:
"""
# create temporary folder
path_tmp = tmp_create(basename="register")
# copy data to temp folder
logger.info(f"\nCopy input data to temp folder...")
if isinstance(fname_src, list):
# TODO: swap 0 and 1 (to be consistent with the child function below)
src_img = image.convert(image.Image(fname_src[0]))
src_img.save(os.path.join(path_tmp, "src.nii"), mutable=True, verbose=verbose)
src_seg = image.convert(image.Image(fname_src[1]))
src_seg.save(os.path.join(path_tmp, "src_seg.nii"), mutable=True, verbose=verbose)
dest_img = image.convert(image.Image(fname_dest[0]))
dest_img.save(os.path.join(path_tmp, "dest.nii"), mutable=True, verbose=verbose)
dest_seg = image.convert(image.Image(fname_dest[1]))
dest_seg.save(os.path.join(path_tmp, "dest_seg.nii"), mutable=True, verbose=verbose)
else:
src_img = image.convert(image.Image(fname_src))
src_img.save(os.path.join(path_tmp, "src.nii"), mutable=True, verbose=verbose)
dest_image = image.convert(image.Image(fname_dest))
dest_image.save(os.path.join(path_tmp, "dest.nii"), mutable=True, verbose=verbose)
if fname_mask != '':
mask_img = image.convert(image.Image(fname_mask))
mask_img.save(os.path.join(path_tmp, "mask.nii.gz"), mutable=True, verbose=verbose)
# go to temporary folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Calculate displacement
if paramreg.algo in ['centermass', 'centermassrot']:
# translation of center of mass between source and destination in voxel space
if paramreg.algo in 'centermass':
rot_method = 'none'
else:
rot_method = paramreg.rot_method
if rot_method in ['hog', 'pcahog']:
src_input = ['src_seg.nii', 'src.nii']
dest_input = ['dest_seg.nii', 'dest.nii']
else:
src_input = ['src.nii']
dest_input = ['dest.nii']
register2d_centermassrot(
src_input, dest_input, paramreg=paramreg, fname_warp=warp_forward_out, fname_warp_inv=warp_inverse_out,
rot_method=rot_method, filter_size=paramreg.filter_size, path_qc=path_qc, verbose=verbose,
pca_eigenratio_th=float(paramreg.pca_eigenratio_th), )
elif paramreg.algo == 'columnwise':
# scaling R-L, then column-wise center of mass alignment and scaling
register2d_columnwise('src.nii',
'dest.nii',
fname_warp=warp_forward_out,
fname_warp_inv=warp_inverse_out,
verbose=verbose,
path_qc=path_qc,
smoothWarpXY=int(paramreg.smoothWarpXY),
)
# ANTs registration
else:
# convert SCT flags into ANTs-compatible flags
algo_dic = {'translation': 'Translation', 'rigid': 'Rigid', 'affine': 'Affine', 'syn': 'SyN', 'bsplinesyn': 'BSplineSyN', 'centermass': 'centermass'}
paramreg.algo = algo_dic[paramreg.algo]
# run slicewise registration
register2d('src.nii',
'dest.nii',
fname_mask=fname_mask,
fname_warp=warp_forward_out,
fname_warp_inv=warp_inverse_out,
paramreg=paramreg,
ants_registration_params=ants_registration_params,
verbose=verbose,
)
logger.info(f"\nMove warping fields...")
copy_helper(warp_forward_out, curdir)
copy_helper(warp_inverse_out, curdir)
# go back
os.chdir(curdir)
if remove_temp_files:
logger.info(f"rm -rf {path_tmp}")
shutil.rmtree(path_tmp)
def register2d_centermassrot(fname_src, fname_dest, paramreg=None, fname_warp='warp_forward.nii.gz',
fname_warp_inv='warp_inverse.nii.gz', rot_method='pca', filter_size=0, path_qc='./',
verbose=0, pca_eigenratio_th=1.6, th_max_angle=40):
"""
Rotate the source image to match the orientation of the destination image, using the first and second eigenvector
of the PCA. This function should be used on segmentations (not images).
This works for 2D and 3D images. If 3D, it splits the image and performs the rotation slice-by-slice.
:param fname_src: List: Name of moving image. If rot=0 or 1, only the first element is used (should be a
segmentation). If rot=2 or 3, the first element is a segmentation and the second is an image.
:param fname_dest: List: Name of fixed image. If rot=0 or 1, only the first element is used (should be a
segmentation). If rot=2 or 3, the first element is a segmentation and the second is an image.
:param paramreg: Class Paramreg()
:param fname_warp: name of output 3d forward warping field
:param fname_warp_inv: name of output 3d inverse warping field
:param rot_method: {'none', 'pca', 'hog', 'pcahog'}. Depending on the rotation method, input might be segmentation
only or segmentation and image.
:param filter_size: size of the gaussian filter for regularization along z for rotation angle (type: float).
0: no regularization
:param path_qc:
:param verbose:
:param pca_eigenratio_th: threshold for the ratio between the first and second eigenvector of the estimated ellipse
for the PCA rotation detection method. If below this threshold, the estimation will be discarded (poorly robust)
:param th_max_angle: threshold of the absolute value of the estimated rotation using the PCA method, above
which the estimation will be discarded (unlikely to happen genuinely and hence considered outlier)
:return:
"""
# TODO: no need to split the src or dest if it is the template (we know its centerline and orientation already)
if verbose == 2:
import matplotlib
matplotlib.use('Agg') # prevent display figure
import matplotlib.pyplot as plt
# Get image dimensions and retrieve nz
logger.info(f"\nGet image dimensions of destination image...")
nx, ny, nz, nt, px, py, pz, pt = image.Image(fname_dest[0]).dim
logger.info(f" matrix size: {str(nx)} x {str(ny)} x {str(nz)}")
logger.info(f" voxel size: {str(px)}mm x {str(py)}mm x {str(nz)}mm")
# Split source volume along z
logger.info(f"\nSplit input segmentation...")
im_src = image.Image(fname_src[0])
split_source_list = image.split_img_data(im_src, 2)
for im in split_source_list:
im.save()
# Split destination volume along z
logger.info(f"\nSplit destination segmentation...")
im_dest = image.Image(fname_dest[0])
split_dest_list = image.split_img_data(im_dest, 2)
for im in split_dest_list:
im.save()
data_src = im_src.data
data_dest = im_dest.data
# if input data is 2D, reshape into pseudo 3D (only one slice)
if len(data_src.shape) == 2:
new_shape = list(data_src.shape)
new_shape.append(1)
new_shape = tuple(new_shape)
data_src = data_src.reshape(new_shape)
data_dest = data_dest.reshape(new_shape)
# Deal with cases where both an image and segmentation are input
if len(fname_src) > 1:
# Split source volume along z
logger.info(f"\nSplit input image...")
im_src_im = image.Image(fname_src[1])
split_source_list = image.split_img_data(im_src_im, 2)
for im in split_source_list:
im.save()
# Split destination volume along z
logger.info(f"\nSplit destination image...")
im_dest_im = image.Image(fname_dest[1])
split_dest_list = image.split_img_data(im_dest_im, 2)
for im in split_dest_list:
im.save()
data_src_im = im_src_im.data
data_dest_im = im_dest_im.data
# initialize displacement and rotation
coord_src = [None] * nz
pca_src = [None] * nz
coord_dest = [None] * nz
pca_dest = [None] * nz
centermass_src = np.zeros([nz, 2])
centermass_dest = np.zeros([nz, 2])
# displacement_forward = np.zeros([nz, 2])
# displacement_inverse = np.zeros([nz, 2])
angle_src_dest = np.zeros(nz)
z_nonzero = []
th_max_angle *= np.pi / 180
# Loop across slices
for iz in sct_progress_bar(range(0, nz), unit='iter', unit_scale=False, desc="Estimate cord angle for each slice",
ascii=False, ncols=100):
try:
# compute PCA and get center or mass based on segmentation
coord_src[iz], pca_src[iz], centermass_src[iz, :] = compute_pca(data_src[:, :, iz])
coord_dest[iz], pca_dest[iz], centermass_dest[iz, :] = compute_pca(data_dest[:, :, iz])
# detect rotation using the HOG method
if rot_method in ['hog', 'pcahog']:
angle_src_hog, conf_score_src = find_angle_hog(data_src_im[:, :, iz], centermass_src[iz, :],
px, py, angle_range=th_max_angle)
angle_dest_hog, conf_score_dest = find_angle_hog(data_dest_im[:, :, iz], centermass_dest[ iz, : ],
px, py, angle_range=th_max_angle)
# In case no maxima is found (it should never happen)
if (angle_src_hog is None) or (angle_dest_hog is None):
logger.warning(f"Slice #{str(iz)} not angle found in dest or src. It will be ignored.")
continue
if rot_method == 'hog':
angle_src = -angle_src_hog # flip sign to be consistent with PCA output
angle_dest = angle_dest_hog
# Detect rotation using the PCA or PCA-HOG method
if rot_method in ['pca', 'pcahog']:
eigenv_src = pca_src[iz].components_.T[0][0], pca_src[iz].components_.T[1][0]
eigenv_dest = pca_dest[iz].components_.T[0][0], pca_dest[iz].components_.T[1][0]
# Make sure first element is always positive (to prevent sign flipping)
if eigenv_src[0] <= 0:
eigenv_src = tuple([i * (-1) for i in eigenv_src])
if eigenv_dest[0] <= 0:
eigenv_dest = tuple([i * (-1) for i in eigenv_dest])
angle_src = angle_between(eigenv_src, [1, 0])
angle_dest = angle_between([1, 0], eigenv_dest)
# compute ratio between axis of PCA
pca_eigenratio_src = pca_src[iz].explained_variance_ratio_[0] / pca_src[iz].explained_variance_ratio_[1]
pca_eigenratio_dest = pca_dest[iz].explained_variance_ratio_[0] / pca_dest[iz].explained_variance_ratio_[1]
# angle is set to 0 if either ratio between axis is too low or outside angle range
if pca_eigenratio_src < pca_eigenratio_th or angle_src > th_max_angle or angle_src < -th_max_angle:
if rot_method == 'pca':
angle_src = 0
elif rot_method == 'pcahog':
logger.info("Switched to method 'hog' for slice: {}".format(iz))
angle_src = -angle_src_hog # flip sign to be consistent with PCA output
if pca_eigenratio_dest < pca_eigenratio_th or angle_dest > th_max_angle or angle_dest < -th_max_angle:
if rot_method == 'pca':
angle_dest = 0
elif rot_method == 'pcahog':
logger.info("Switched to method 'hog' for slice: {}".format(iz))
angle_dest = angle_dest_hog
if not rot_method == 'none':
# bypass estimation is source or destination angle is known a priori
if paramreg.rot_src is not None:
angle_src = paramreg.rot_src
if paramreg.rot_dest is not None:
angle_dest = paramreg.rot_dest
# the angle between (src, dest) is the angle between (src, origin) + angle between (origin, dest)
angle_src_dest[iz] = angle_src + angle_dest
# append to list of z_nonzero
z_nonzero.append(iz)
# if one of the slice is empty, ignore it
except ValueError:
logger.warning(f"Slice #{str(iz)} is empty. It will be ignored.")
# regularize rotation
if not filter_size == 0 and (rot_method in ['pca', 'hog', 'pcahog']):
# Filtering the angles by gaussian filter
angle_src_dest_regularized = ndimage.filters.gaussian_filter1d(angle_src_dest[z_nonzero], filter_size)
if verbose == 2:
plt.plot(180 * angle_src_dest[z_nonzero] / np.pi, 'ob')
plt.plot(180 * angle_src_dest_regularized / np.pi, 'r', linewidth=2)
plt.grid()
plt.xlabel('z')
plt.ylabel('Angle (deg)')
plt.title("Regularized cord angle estimation (filter_size: {})".format(filter_size))
plt.savefig(os.path.join(path_qc, 'register2d_centermassrot_regularize_rotation.png'))
plt.close()
# update variable
angle_src_dest[z_nonzero] = angle_src_dest_regularized
warp_x = np.zeros(data_dest.shape)
warp_y = np.zeros(data_dest.shape)
warp_inv_x = np.zeros(data_src.shape)
warp_inv_y = np.zeros(data_src.shape)
# construct 3D warping matrix
for iz in sct_progress_bar(z_nonzero, unit='iter', unit_scale=False, desc="Build 3D deformation field",
ascii=False, ncols=100):
# get indices of x and y coordinates
row, col = np.indices((nx, ny))
# build 2xn array of coordinates in pixel space
coord_init_pix = np.array([row.ravel(), col.ravel(), np.array(np.ones(len(row.ravel())) * iz)]).T
# convert coordinates to physical space
coord_init_phy = np.array(im_src.transfo_pix2phys(coord_init_pix))
# get centermass coordinates in physical space
centermass_src_phy = im_src.transfo_pix2phys([[centermass_src[iz, :].T[0], centermass_src[iz, :].T[1], iz]])[0]
centermass_dest_phy = im_src.transfo_pix2phys([[centermass_dest[iz, :].T[0], centermass_dest[iz, :].T[1], iz]])[0]
# build rotation matrix
R = np.matrix(((cos(angle_src_dest[iz]), sin(angle_src_dest[iz])), (-sin(angle_src_dest[iz]), cos(angle_src_dest[iz]))))
# build 3D rotation matrix
R3d = np.eye(3)
R3d[0:2, 0:2] = R
# apply forward transformation (in physical space)
coord_forward_phy = np.array(np.dot((coord_init_phy - np.transpose(centermass_dest_phy)), R3d) + np.transpose(centermass_src_phy))
# apply inverse transformation (in physical space)
coord_inverse_phy = np.array(np.dot((coord_init_phy - np.transpose(centermass_src_phy)), R3d.T) + np.transpose(centermass_dest_phy))
# display rotations
if verbose == 2 and not angle_src_dest[iz] == 0 and not rot_method == 'hog':
# compute new coordinates
coord_src_rot = coord_src[iz] * R
coord_dest_rot = coord_dest[iz] * R.T
# generate figure
plt.figure(figsize=(9, 9))
# plt.ion() # enables interactive mode (allows keyboard interruption)
for isub in [221, 222, 223, 224]:
# plt.figure
plt.subplot(isub)
# ax = matplotlib.pyplot.axis()
try:
if isub == 221:
plt.scatter(coord_src[iz][:, 0], coord_src[iz][:, 1], s=5, marker='o', zorder=10, color='steelblue',
alpha=0.5)
pcaaxis = pca_src[iz].components_.T
pca_eigenratio = pca_src[iz].explained_variance_ratio_
plt.title('src')
elif isub == 222:
plt.scatter([coord_src_rot[i, 0] for i in range(len(coord_src_rot))], [coord_src_rot[i, 1] for i in range(len(coord_src_rot))], s=5, marker='o', zorder=10, color='steelblue', alpha=0.5)
pcaaxis = pca_dest[iz].components_.T
pca_eigenratio = pca_dest[iz].explained_variance_ratio_
plt.title('src_rot')
elif isub == 223:
plt.scatter(coord_dest[iz][:, 0], coord_dest[iz][:, 1], s=5, marker='o', zorder=10, color='red',
alpha=0.5)
pcaaxis = pca_dest[iz].components_.T
pca_eigenratio = pca_dest[iz].explained_variance_ratio_
plt.title('dest')
elif isub == 224:
plt.scatter([coord_dest_rot[i, 0] for i in range(len(coord_dest_rot))], [coord_dest_rot[i, 1] for i in range(len(coord_dest_rot))], s=5, marker='o', zorder=10, color='red', alpha=0.5)
pcaaxis = pca_src[iz].components_.T
pca_eigenratio = pca_src[iz].explained_variance_ratio_
plt.title('dest_rot')
plt.text(-2.5, -2, 'eigenvectors:', horizontalalignment='left', verticalalignment='bottom')
plt.text(-2.5, -2.8, str(pcaaxis), horizontalalignment='left', verticalalignment='bottom')
plt.text(-2.5, 2.5, 'eigenval_ratio:', horizontalalignment='left', verticalalignment='bottom')
plt.text(-2.5, 2, str(pca_eigenratio), horizontalalignment='left', verticalalignment='bottom')
plt.plot([0, pcaaxis[0, 0]], [0, pcaaxis[1, 0]], linewidth=2, color='red')
plt.plot([0, pcaaxis[0, 1]], [0, pcaaxis[1, 1]], linewidth=2, color='orange')
plt.axis([-3, 3, -3, 3])
plt.gca().set_aspect('equal', adjustable='box')
except Exception as e:
raise Exception
plt.savefig(os.path.join(path_qc, 'register2d_centermassrot_pca_z' + str(iz) + '.png'))
plt.close()
# construct 3D warping matrix
warp_x[:, :, iz] = np.array([coord_forward_phy[i, 0] - coord_init_phy[i, 0] for i in range(nx * ny)]).reshape((nx, ny))
warp_y[:, :, iz] = np.array([coord_forward_phy[i, 1] - coord_init_phy[i, 1] for i in range(nx * ny)]).reshape((nx, ny))
warp_inv_x[:, :, iz] = np.array([coord_inverse_phy[i, 0] - coord_init_phy[i, 0] for i in range(nx * ny)]).reshape((nx, ny))
warp_inv_y[:, :, iz] = np.array([coord_inverse_phy[i, 1] - coord_init_phy[i, 1] for i in range(nx * ny)]).reshape((nx, ny))
# Generate forward warping field (defined in destination space)
generate_warping_field(fname_dest[0], warp_x, warp_y, fname_warp, verbose)
generate_warping_field(fname_src[0], warp_inv_x, warp_inv_y, fname_warp_inv, verbose)
def register2d_columnwise(fname_src, fname_dest, fname_warp='warp_forward.nii.gz', fname_warp_inv='warp_inverse.nii.gz', verbose=0, path_qc='./', smoothWarpXY=1):
"""
Column-wise non-linear registration of segmentations. Based on an idea from <NAME>.
- Assumes src/dest are segmentations (not necessarily binary), and already registered by center of mass
- Assumes src/dest are in RPI orientation.
- Split along Z, then for each slice:
- scale in R-L direction to match src/dest
- loop across R-L columns and register by (i) matching center of mass and (ii) scaling.
:param fname_src:
:param fname_dest:
:param fname_warp:
:param fname_warp_inv:
:param verbose:
:return:
"""
# initialization
th_nonzero = 0.5 # values below are considered zero
# for display stuff
if verbose == 2:
import matplotlib
matplotlib.use('Agg') # prevent display figure
import matplotlib.pyplot as plt
# Get image dimensions and retrieve nz
logger.info(f"\nGet image dimensions of destination image...")
nx, ny, nz, nt, px, py, pz, pt = image.Image(fname_dest).dim
logger.info(f" matrix size: {str(nx)} x {str(ny)} x {str(nz)}")
logger.info(f" voxel size: {str(px)}mm x {str(py)}mm x {str(nz)}mm")
# Split source volume along z
logger.info(f"\nSplit input volume...")
im_src = image.Image('src.nii')
split_source_list = image.split_img_data(im_src, 2)
for im in split_source_list:
im.save()
# Split destination volume along z
logger.info(f"\nSplit destination volume...")
im_dest = image.Image('dest.nii')
split_dest_list = image.split_img_data(im_dest, 2)
for im in split_dest_list:
im.save()
# open image
data_src = im_src.data
data_dest = im_dest.data
if len(data_src.shape) == 2:
# reshape 2D data into pseudo 3D (only one slice)
new_shape = list(data_src.shape)
new_shape.append(1)
new_shape = tuple(new_shape)
data_src = data_src.reshape(new_shape)
data_dest = data_dest.reshape(new_shape)
# initialize forward warping field (defined in destination space)
warp_x = np.zeros(data_dest.shape)
warp_y = np.zeros(data_dest.shape)
# initialize inverse warping field (defined in source space)
warp_inv_x = np.zeros(data_src.shape)
warp_inv_y = np.zeros(data_src.shape)
# Loop across slices
logger.info(f"\nEstimate columnwise transformation...")
for iz in range(0, nz):
logger.info(f"{str(iz)}/{str(nz)}..")
# PREPARE COORDINATES
# ============================================================
# get indices of x and y coordinates
row, col = np.indices((nx, ny))
# build 2xn array of coordinates in pixel space
# ordering of indices is as follows:
# coord_init_pix[:, 0] = 0, 0, 0, ..., 1, 1, 1..., nx, nx, nx
# coord_init_pix[:, 1] = 0, 1, 2, ..., 0, 1, 2..., 0, 1, 2
coord_init_pix = np.array([row.ravel(), col.ravel(), np.array(np.ones(len(row.ravel())) * iz)]).T
# convert coordinates to physical space
coord_init_phy = np.array(im_src.transfo_pix2phys(coord_init_pix))
# get 2d data from the selected slice
src2d = data_src[:, :, iz]
dest2d = data_dest[:, :, iz]
# julien 20161105
#<<<
# threshold at 0.5
src2d[src2d < th_nonzero] = 0
dest2d[dest2d < th_nonzero] = 0
# get non-zero coordinates, and transpose to obtain nx2 dimensions
coord_src2d = np.array( | np.where(src2d > 0) | numpy.where |
import numpy
from nonbonded.library.statistics.statistics import (
StatisticType,
bootstrap_residuals,
compute_statistics,
)
N_DATA_POINTS = 1000
N_ITERATIONS = 1000
def test_rmse():
"""Test that the statistics module returns the 'correct' RMSE
to within some noise given a set of noisy estimated values."""
expected_std = numpy.random.rand() + 1.0
estimated_values = numpy.random.normal(0.0, expected_std, N_DATA_POINTS)
estimated_std = numpy.zeros(N_DATA_POINTS)
(statistic_values, _, _) = compute_statistics(
numpy.zeros(N_DATA_POINTS),
numpy.array([None] * N_DATA_POINTS),
estimated_values,
estimated_std,
N_ITERATIONS,
statistic_types=[StatisticType.RMSE],
)
assert numpy.isclose(statistic_values[StatisticType.RMSE], expected_std, rtol=0.1)
def test_r2():
"""Test that the statistics module returns the 'correct' R2
to within some noise given a set of noisy estimated values."""
measured_values = numpy.linspace(0.0, 1.0, N_DATA_POINTS)
estimated_values = measured_values + numpy.random.rand() / 100.0
(statistic_values, _, _) = compute_statistics(
measured_values,
numpy.zeros(N_DATA_POINTS),
estimated_values,
numpy.zeros(N_DATA_POINTS),
N_ITERATIONS,
statistic_types=[StatisticType.R2],
)
assert numpy.isclose(statistic_values[StatisticType.R2], 1.0, rtol=0.05)
estimated_values = numpy.zeros(N_DATA_POINTS) + | numpy.random.rand() | numpy.random.rand |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x + " "
Y0 = None
Y2 = None
Y3 = None
Y4 = None
Yfit = {}
Yout = {"coi_level":None} # output dictionary (2014-01-01; replace Y0,Y1,Y2,Y3)
lfilt1_aspcorr = "not initialized"
lfilt2_aspcorr = "not initialized"
qflag = quality_flags()
ZOpos = None
# parameters getSpec()
Yout.update({'indir':indir,'obsid':obsid,'ext':ext})
Yout.update({'ra':RA,'dec':DEC,'wheelpos':wheelpos})
if type(sumimage) == typeNone:
if background_template is not None:
# convert background_template to a dictionary
background_template = {'template':np.asarray(background_template),
'sumimg':False}
try:
ext = int(ext)
except:
print("fatal error in extension number: must be an integer value")
# locate related lenticular images
specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile = \
fileinfo(filestub,ext,directory=indir,wheelpos=wheelpos,chatter=chatter)
# set some flags and variables
lfiltinput = (lfilt1 != None) ^ (lfilt2 != None)
lfiltpresent = lfiltinput | (lfilt1_ != None) | (lfilt2_ != None)
if (type(lfilt1_) == typeNone) & (type(lfilt2_) == typeNone):
# ensure the output is consistent with no lenticular filter solution
use_lenticular_image = False
# translate
filt_id = {"wh":"wh","v":"vv","b":"bb","u":"uu","uvw1":"w1","uvm2":"m2","uvw2":"w2"}
lfiltflag = False
if ((type(lfilt1) == typeNone)&(type(lfilt1_) != typeNone)):
lfilt1 = lfilt1_
lfilt1_ext = lfilt1_ext_
if chatter > 0: print("lenticular filter 1 from search lenticular images"+lfilt1+"+"+str(lfilt1_ext))
lfiltflag = True
lfilt1_aspcorr = None
try:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
except:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img.gz",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
if ((type(lfilt2) == typeNone)&(type(lfilt2_) != typeNone)):
lfilt2 = lfilt2_
lfilt2_ext = lfilt2_ext_
if chatter > 0: print("lenticular filter 2 from search lenticular images"+lfilt2+"+"+str(lfilt2_ext))
lfiltflag = True
lfilt2_aspcorr = None
try:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
except:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img.gz",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
# report
if chatter > 4:
msg2 += "getSpec: image parameter values\n"
msg2 += "ra, dec = (%6.1f,%6.1f)\n" % (RA,DEC)
msg2 += "filestub, extension = %s[%i]\n"% (filestub, ext)
if lfiltpresent & use_lenticular_image:
msg2 += "first/only lenticular filter = "+lfilt1+" extension first filter = "+str(lfilt1_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt1_aspcorr)
if lfilt2_ext != None:
msg2 += "second lenticular filter = "+lfilt2+" extension second filter = "+str(lfilt2_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt2_aspcorr)
if not use_lenticular_image:
msg2 += "anchor position derived without lenticular filter\n"
msg2 += "spectrum extraction preset width = "+str(spextwidth)+'\n'
#msg2 += "optimal extraction "+str(optimal_extraction)+'\n'
hdr = pyfits.getheader(specfile,int(ext))
if chatter > -1:
msg += '\nuvotgetspec version : '+__version__+'\n'
msg += ' Position RA,DEC : '+str(RA)+' '+str(DEC)+'\n'
msg += ' Start date-time : '+str(hdr['date-obs'])+'\n'
msg += ' grism file : '+specfile.split('/')[-1]+'['+str(ext)+']\n'
msg += ' attitude file : '+attfile.split('/')[-1]+'\n'
if lfiltpresent & use_lenticular_image:
if ((lfilt1 != None) & (lfilt1_ext != None)):
msg += ' lenticular file 1: '+lfilt1+'['+str(lfilt1_ext)+']\n'
msg += ' aspcorr: '+lfilt1_aspcorr+'\n'
if ((lfilt2 != None) & (lfilt2_ext != None)):
msg += ' lenticular file 2: '+lfilt2+'['+str(lfilt2_ext)+']\n'
msg += ' aspcorr: '+lfilt2_aspcorr+'\n'
if not use_lenticular_image:
msg += "anchor position derived without lenticular filter\n"
if not 'ASPCORR' in hdr: hdr['ASPCORR'] = 'UNKNOWN'
Yout.update({'hdr':hdr})
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
wheelpos = hdr['WHEELPOS']
expo = hdr['EXPOSURE']
expmap = [hdr['EXPOSURE']]
Yout.update({'wheelpos':wheelpos})
if 'FRAMTIME' not in hdr:
# compute the frametime from the CCD deadtime and deadtime fraction
#deadc = hdr['deadc']
#deadtime = 600*285*1e-9 # 600ns x 285 CCD lines seconds
#framtime = deadtime/(1.0-deadc)
framtime = 0.0110329
hdr.update('framtime',framtime,comment='frame time computed from deadc ')
Yout.update({'hdr':hdr})
if chatter > 1:
print("frame time computed from deadc - added to hdr")
print("with a value of ",hdr['framtime']," ",Yout['hdr']['framtime'])
if not 'detnam' in hdr:
hdr.update('detnam',str(hdr['wheelpos']))
msg += ' exposuretime : %7.1f \n'%(expo)
maxcounts = 1.1 * expo/framtime
if chatter > 0:
msg += ' wheel position : '+str(wheelpos)+'\n'
msg += ' roll angle : %5.1f\n'% (hdr['pa_pnt'])
msg += 'coincidence loss version: 2 (2014-07-23)\n'
msg += '======================================\n'
try:
if ( (np.abs(RA - hdr['RA_OBJ']) > 0.4) ^ (np.abs(DEC - hdr['DEC_OBJ']) > 0.4) ):
sys.stderr.write("\nWARNING: It looks like the input RA,DEC and target position in header are different fields\n")
except (RuntimeError, TypeError, NameError, KeyError):
pass
msg2 += " cannot read target position from header for verification\n"
if lfiltinput:
# the lenticular filter(s) were specified on the command line.
# check that the lenticular image and grism image are close enough in time.
if type(lfilt1_ext) == typeNone:
lfilt1_ext = int(ext)
lpos = np.where( np.array([lfilt1]) == lfiltnames )
if len(lpos[0]) < 1: sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile1 = filestub+lnam[0]+'_sk.img'
hdr_l1 = pyfits.getheader(lfile1,lfilt1_ext)
tstart1 = hdr_l1['TSTART']
tstop1 = hdr_l1['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile1+" matches the grism image\n")
if lfilt2 != None:
if type(lfilt2_ext) == typeNone:
lfilt2_ext = lfilt1_ext+1
lpos = np.where( np.array([lfilt2]) == lfiltnames )
if len(lpos[0] < 1): sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile2 = filestub+lnam[0]+'_sk.img'
hdr_l2 = pyfits.getheader(lfile1,lfilt1_ext)
tstart2 = hdr_l2['TSTART']
tstop2 = hdr_l2['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile2+" matches the grism image\n")
if (not lfiltpresent) | (not use_lenticular_image):
method = "grism_only"
else:
method = None
if not senscorr: msg += "WARNING: No correction for sensitivity degradation applied.\n"
# get the USNO-B1 catalog data for the field, & find the zeroth orders
if (not skip_field_src):
if chatter > 2: print("============== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
# retrieve the input angle relative to the boresight
Xphi, Yphi, date1, msg3, lenticular_anchors = findInputAngle( RA, DEC, filestub, ext,
uvotgraspcorr_on=uvotgraspcorr_on, update_pnt=update_pnt, msg="", \
wheelpos=wheelpos, lfilter=lfilt1, lfilter_ext=lfilt1_ext, \
lfilt2=lfilt2, lfilt2_ext=lfilt2_ext, method=method, \
attfile=attfile, catspec=catspec, indir=indir, chatter=chatter)
Yout.update({"Xphi":Xphi,"Yphi":Yphi})
Yout.update({'lenticular_anchors':lenticular_anchors})
# read the anchor and dispersion out of the wavecal file
anker, anker2, C_1, C_2, angle, calibdat, msg4 = getCalData(Xphi,Yphi,wheelpos, date1, \
calfile=calfile, chatter=chatter)
hdrr = pyfits.getheader(specfile,int(ext))
if (hdrr['aspcorr'] == 'UNKNOWN') & (not lfiltpresent):
msg += "WARNING: No aspect solution found. Anchor uncertainty large.\n"
msg += "first order anchor position on detector in det coordinates:\n"
msg += "anchor1=(%8.2f,%8.2f)\n" % (anker[0],anker[1])
msg += "first order dispersion polynomial (distance anchor, \n"
msg += " highest term first)\n"
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order anchor position on detector in det coordinates:\n"
msg += "anchor2=(%8.2f,%8.2f)\n" % (anker2[0],anker2[1])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
#sys.stderr.write( "first order anchor = %s\n"%(anker))
#sys.stderr.write( "second order anchor = %s\n"%(anker2))
msg += "first order dispersion = %s\n"%(str(C_1))
msg += "second order dispersion = %s\n"%(str(C_2))
if chatter > 1:
sys.stderr.write( "first order dispersion = %s\n"%(str(C_1)) )
sys.stderr.write( "second order dispersion = %s\n"%(str(C_2)) )
msg += "lenticular filter anchor positions (det)\n"
msg += msg3
# override angle
if fixed_angle != None:
msg += "WARNING: overriding calibration file angle for extracting \n\t"\
"spectrum cal: "+str(angle)+'->'+str(fixed_angle)+" \n"
angle = fixed_angle
# override anchor position in det pixel coordinates
if anchor_position[0] != None:
cal_anker = anker
anker = np.array(anchor_position)
msg += "overriding anchor position with value [%8.1f,%8.1f]\n" % (anker[0],anker[1])
anker2 = anker2 -cal_anker + anker
msg += "overriding anchor position 2nd order with value [%8.1f,%8.1f]\n"%(anker2[0],anker2[1])
anker_field = np.array([Xphi,Yphi])
theta=np.zeros(5)+angle # use the angle from first order everywhere.
C_0 = np.zeros(3) # not in calibration file. Use uvotcal/zemax to get.
C_3 = np.zeros(3)
Cmin1 = np.zeros(3)
msg += "field coordinates:\n"
msg += "FIELD=(%9.4f,%9.4f)\n" % (Xphi,Yphi)
# order distance between anchors
dist12 = np.sqrt( (anker[0]-anker2[0])**2 + (anker[1]-anker2[1])**2 )
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
Yout.update({"anker":anker,"anker2":anker2,"C_1":C_1,"C_2":C_2,"theta":angle,"dist12":dist12})
# determine x,y locations of certain wavelengths on the image
# TBD: add curvature
if wheelpos < 500:
wavpnt = np.arange(1700,6800,slit_width)
else:
wavpnt = np.arange(2500,6600,slit_width)
dispnt=pixdisFromWave(C_1,wavpnt) # pixel distance to anchor
if chatter > 0: msg2 += 'first order angle at anchor point: = %7.1f\n'%(angle)
crpix = crpix1,crpix2 = hdr['crpix1'],hdr['crpix2']
crpix = np.array(crpix) # centre of image
ankerimg = anker - np.array([1100.5,1100.5])+crpix
xpnt = ankerimg[0] + dispnt*np.cos((180-angle)*np.pi/180)
ypnt = ankerimg[1] + dispnt*np.sin((180-angle)*np.pi/180)
msg += "1st order anchor on image at (%7.1f,%7.1f)\n"%(ankerimg[0],ankerimg[1])
if chatter > 4: msg += "Found anchor point; now extracting spectrum.\n"
if chatter > 2: print("==========Found anchor point; now extracting spectrum ========")
if type(offsetlimit) == typeNone:
if wheelpos > 300:
offsetlimit = 9
sys.stdout.write("automatically set the value for the offsetlimit = "+str(offsetlimit)+'\n')
# find position zeroth order on detector from WCS-S after update from uvotwcs
#if 'hdr' not in Yout:
# hdr = pyfits.getheader(specfile,int(ext))
# Yout.update({'hdr':hdr})
zero_xy_imgpos = [-1,-1]
if chatter > 1: print("zeroth order position on image...")
try:
wS =wcs.WCS(header=hdr,key='S',relax=True,)
zero_xy_imgpos = wS.wcs_world2pix([[RA,DEC]],0)
print("position not corrected for SIP = ", zero_xy_imgpos[0][0],zero_xy_imgpos[0][1])
zero_xy_imgpos = wS.sip_pix2foc(zero_xy_imgpos, 0)[0]
if chatter > 1:
"print zeroth order position on image:",zero_xy_imgpos
except:
pass
Yout.update({'zeroxy_imgpos':zero_xy_imgpos})
# provide some checks on background inputs:
if background_lower[0] != None:
background_lower = np.abs(background_lower)
if np.sum(background_lower) >= (slit_width-10):
background_lower = [None,None]
msg += "WARNING: background_lower set too close to edge image\n Using default\n"
if background_upper[0] != None:
background_upper = np.abs(background_upper)
if np.sum(background_upper) >= (slit_width-10):
background_upper = [None,None]
msg += "WARNING: background_upper set too close to edge image\n Using default\n"
# in case of summary file:
if (not skip_field_src) & (ZOpos == None):
if chatter > 2: print("DEBUG 802 ================== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
try:
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
except:
if type(sumimage) == typeNone:
print ("exception to call find_zeroth_orders : skip_field_src = ",skip_field_src)
pass
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
if (not skip_field_src):
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
pivot_ori=np.array([(ankerimg)[0],(ankerimg)[1]])
Y_ZOpos={"Xim":Xim,"Yim":Yim,"Xa":Xa,"Yb":Yb,"Thet":Thet,"b2mag":b2mag,
"matched":matched,"ondetector":ondetector}
Yout.update({"ZOpos":Y_ZOpos})
else:
Yout.update({"ZOpos":None})
# find background, extract straight slit spectrum
if chatter > 3 : print ("DEBUG 827 compute background")
if sumimage != None:
# initialize parameters for extraction summed extracted image
print('reading summed image file : '+sumimage)
print('ext label for output file is set to : ', ext)
Y6 = sum_Extimage (None, sum_file_name=sumimage, mode='read')
extimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, \
(coef0, coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr = Y6
if background_template != None:
background_template = {'extimg': background_template,
'sumimg': True}
if (background_template['extimg'].size != extimg.size):
print("ERROR")
print("background_template.size=",background_template['extimg'].size)
print("extimg.size=",extimg.size)
raise IOError("The template does not match the sumimage dimensions")
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
print("first order anchor = ",anker)
print("first order dispersion = %s"%(str(C_1)))
print("second order dispersion = %s"%(str(C_2)))
tstart = hdr['tstart']
ank_c = [100,500,0,2000]
if type(offsetlimit) == typeNone:
offset = 0
elif type(offsetlimit) == list:
offset = offsetlimit[0]-96
ank_c[0] = offsetlimit[0]
else:
offset = offsetlimit # for sumimage used offsetlimit to set the offset
ank_c[0] = 96+offsetlimit
dis = np.arange(-500,1500)
img = extimg
# get background
bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra = findBackground(extimg,
background_lower=background_lower,
background_upper=background_upper,)
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
skip_field_src = True
spnet = bg1 # placeholder
expo = exposure
maxcounts = exposure/0.01
anker2 = anker + [dist12,0]
spimg,spnetimg,anker_field = None, None, (0.,0.)
m1,m2,aa,wav1 = None,None,None,None
if type(outfile) == typeNone:
outfile='sum_image_'
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef} )
Yout.update({"anker":anker,"anker2":None,
"C_1":C_1,"C_2":C_2,
"Xphi":0.0,"Yphi":0.0,
"wheelpos":wheelpos,"dist12":dist12,
"hdr":hdr,"offset":offset})
Yout.update({"background_1":bg1,"background_2":bg2})
dropout_mask = None
Yout.update({"zeroxy_imgpos":[1000,1000]})
else:
# default extraction
if chatter > 2 : print ("DEBUG 894 default extraction")
# start with a quick straight slit extraction
exSpIm = extractSpecImg(specfile,ext,ankerimg,angle,spwid=spextwidth,
background_lower=background_lower, background_upper=background_upper,
template = background_template, x_offset = anchor_x_offset, ank_c_0offset=ank_c_0offset,
offsetlimit=offsetlimit, replace=replace, chatter=chatter, singleside_bkg=singleside_bkg)
dis = exSpIm['dis']
spnet = exSpIm['spnet']
bg = exSpIm['bg']
bg1 = exSpIm['bg1']
bg2 = exSpIm['bg2']
bgsig = exSpIm['bgsigma']
bgimg = exSpIm['bgimg']
bg_limits_used = exSpIm['bg_limits_used']
bgextra = exSpIm['bgextras']
extimg = exSpIm['extimg']
spimg = exSpIm['spimg']
spnetimg = exSpIm['spnetimg']
offset = exSpIm['offset']
ank_c = exSpIm['ank_c']
if background_template != None:
background_template ={"extimg":exSpIm["template_extimg"]}
Yout.update({"template":exSpIm["template_extimg"]})
if exSpIm['dropouts']:
dropout_mask = exSpIm['dropout_mask']
else: dropout_mask = None
Yout.update({"background_1":bg1,"background_2":bg2})
#msg += "1st order anchor offset from spectrum = %7.1f\n"%(offset)
#msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],ank_c[0])
calibdat = None # free the memory
if chatter > 2: print("============ straight slit extraction complete =================")
if np.max(spnet) < maxcounts: maxcounts = 2.0*np.max(spnet)
# initial limits spectrum (pixels)
m1 = ank_c[1]-400
if wheelpos > 500: m1 = ank_c[1]-370
if m1 < 0: m1 = 0
if m1 < (ank_c[2]+30): m1 = ank_c[2]+30
m2 = ank_c[1]+2000
if wheelpos > 500: m2 = ank_c[1]+1000
if m2 >= len(dis): m2 = len(dis)-2
if m2 > (ank_c[3]-40): m2=(ank_c[3]-40)
aa = list(range(int(m1),int(m2)))
wav1 = polyval(C_1,dis[aa])
# get grism det image
img = pyfits.getdata(specfile, ext)
if isinstance(replace,np.ndarray):
img = replace
try:
offset = np.asscalar(offset)
except:
pass
Yout.update({"offset":offset})
Zbg = bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra
net = extimg-bgextra[-1]
var = extimg.copy()
dims = np.asarray( img.shape )
dims = np.array([dims[1],dims[0]])
dims2 = np.asarray(extimg.shape)
dims2 = np.array([dims2[1],dims2[0]])
msg += "Lower background from y = %i pix\nLower background to y = %i pix\n" % (bg_limits_used[0],bg_limits_used[1])
msg += "Upper background from y = %i pix\nUpper background to y = %i pix\n" % (bg_limits_used[2],bg_limits_used[3])
msg += "TRACKWID =%4.1f\n" % (trackwidth)
# collect some results:
if sumimage == None:
Y0 = (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra
else:
Y0 = None, None, None, (dist12, None, None), expmap, bgimg, bg_limits_used, bgextra
angle = 0.0
# curvature from input (TBD how - placeholder with raw_input)
# choose input coef or pick from plot
# choose order to do it for
if (get_curve & interactive) | (get_curve & (get_curve_filename != None)):
if chatter > 3 : print ("DEBUG 978 get user-provided curve coefficients and extract spectrum")
spextwidth = None
# grab coefficients
poly_1 = None
poly_2 = None
poly_3 = None
if get_curve_filename == None:
try:
poly_1 = eval(input("give coefficients of first order polynomial array( [X^3,X^2,X,C] )"))
poly_2 = eval(input("give coefficients of second order polynomial array( [X^2,X,C] )"))
poly_3 = eval(input("give coefficients of third order polynomial array( [X,C] )"))
except:
print("failed")
if (type(poly_1) != list) | (type(poly_2) != list) | (type(poly_3) != list):
print("poly_1 type = ",type(poly_1))
print("poly_2 type = ",type(poly_2))
print("poly_3 type = ",type(poly_3))
raise IOError("the coefficients must be a list")
poly_1 = np.asarray(poly_1)
poly_2 = np.asarray(poly_2)
poly_3 = np.asarray(poly_3)
else:
try:
curfile = rdList(get_curve_filename)
poly_1 = np.array(curfile[0][0].split(','),dtype=float)
poly_2 = np.array(curfile[1][0].split(','),dtype=float)
poly_3 = np.array(curfile[2][0].split(','),dtype=float)
except:
print("There seems to be a problem when readin the coefficients out of the file")
print("The format is a list of coefficient separated by comma's, highest order first")
print("The first line for the first order")
print("The second line for the secons order")
print("The third line for the third order")
print("like, \n1.233e-10,-7.1e-7,3.01e-3,0.0.\n1.233e-5,-2.3e-2,0.03.0\n1.7e-1,0.9\n")
print(get_curve_filename)
print(curfile)
print(poly_1)
print(poly_2)
print(poly_3)
raise IOError("ERROR whilst reading curvature polynomial from file\n")
print("Curvature coefficients were read in...\npoly_1: %s \npoly_2: %s \npoly_3: %s \n"%
(poly_1,poly_2,poly_3))
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved \
= curved_extraction(
extimg, ank_c, anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
predict_second_order=predict2nd,
background_template=background_template,
angle=angle, offset=offset,
poly_1=poly_1, poly_2=poly_2, poly_3=poly_3,
msg=msg, curved=curved,
outfull=True, expmap=expmap,
fit_second=fit_second,
fit_third=fit_second,
C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
# fit_sigmas parameter needs passing
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
# update the anchor y-coordinate
if chatter > 3 : print ("DEBUG 1048 update anchor coordinate\noriginal ank_c=%s\ny1=%s"%(ank_c,y1))
ank_c[0] = y1[np.int(ank_c[1])]
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
# curvature from calibration
if spextwidth != None:
if chatter > 3 : print ("DEBUG 1067 get curve coefficients from cal file and extract spectrum ")
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown) , apercorr, expospec, msg, curved \
= curved_extraction(
extimg,ank_c,anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
background_lower=background_lower,
background_upper=background_upper, \
background_template=background_template,\
angle=angle, offset=offset,
outfull=True, expmap=expmap,
msg = msg, curved=curved,
fit_second=fit_second,
fit_third=fit_second, C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
(present0,present1,present2,present3),(q0,q1,q2,q3), \
(y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
(y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
(x,xstart,xend,sp_all,quality,co_back) = fitorder
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
ank_c[0] = y1[int(ank_c[1])]
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
msg += "orders present:"
if present0: msg += "0th order, "
if present1: msg += "first order"
if present2: msg += ", second order"
if present3: msg += ", third order "
print('1224 CCCCCCCCCCCCC', coef1)
print(RA,DEC)
print(anker)
print(ank_c)
msg += '\nparametrized order curvature:\n'
if present0:
for k in range(len(coef0)):
msg += "COEF0_"+str(k)+"=%12.4e\n" % (coef0[k])
if present1:
for k in range(len(coef1)):
msg += "COEF1_"+str(k)+"=%12.4e\n" % (coef1[k])
if present2:
for k in range(len(coef2)):
msg += "COEF2_"+str(k)+"=%12.4e\n" % (coef2[k])
if present3:
for k in range(len(coef3)):
msg += "COEF3_"+str(k)+"=%12.4e\n" % (coef3[k])
msg += '\nparametrized width slit:\n'
if present0:
for k in range(len(sig0coef)):
msg += "SIGCOEF0_"+str(k)+"=%12.4e\n" % (sig0coef[k])
if present1:
for k in range(len(sig1coef)):
msg += "SIGCOEF1_"+str(k)+"=%12.4e\n" % (sig1coef[k])
if present2:
for k in range(len(sig2coef)):
msg += "SIGCOEF2_"+str(k)+"=%12.4e\n" % (sig2coef[k])
if present3:
for k in range(len(sig3coef)):
msg += "SIGCOEF3_"+str(k)+"=%12.4e\n" % (sig3coef[k])
if chatter > 3 : print ("DEBUG 1142 done spectral extraction, now calibrate")
offset = ank_c[0]-slit_width/2
msg += "best fit 1st order anchor offset from spectrum = %7.1f\n"%(offset)
msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],y1[int(ank_c[1])])
msg += msg4
Yout.update({"offset":offset})
#2012-02-20 moved updateFitorder to curved_extraction
#if curved == "update":
# fit = fitorder2
#else:
# fit = fitorder
fit = fitorder
if optimal_extraction:
# development dropped, since mod8 causes slit width oscillations
# also requires a good second order flux and coi calibration for
# possible further development of order splitting.
# result in not consistent now.
print("Starting optimal extraction: This can take a few minutes ......\n\t "\
"........\n\t\t .............")
Y3 = get_initspectrum(net,var,fit,160,ankerimg,C_1=C_1,C_2=C_2,dist12=dist12,
predict2nd=predict2nd,
chatter=1)
counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
# need to test that C_2 is valid here
if predict2nd:
Y4 = predict_second_order(dis,(sp_first-bg_first), C_1,C_2, dist12, quality,dlim1L, dlim1U,wheelpos)
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
# retrieve the effective area
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=1,arf=fluxcalfile,msg=msg,chatter=chatter)
EffArea1 = Y7[:-1]
msg = Y7[-1]
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=2,arf=None,msg=msg,chatter=chatter)
if type(Y7) == tuple:
EffArea2 = Y7[:-1]
else:
if type(Y7) != typeNone: msg = Y7
EffArea2 = None
# note that the output differs depending on parameters given, i.e., arf, anchor
Yout.update({"effarea1":EffArea1,"effarea2":EffArea2})
if interactive:
import matplotlib.pyplot as plt
if (plot_img) & (sumimage == None):
#plt.winter()
# make plot of model on image [figure 1]
#xa = np.where( (dis < 1400) & (dis > -300) )
bga = bg.copy()
fig1 = plt.figure(1); plt.clf()
img[img <=0 ] = 1e-16
plt.imshow(np.log(img),vmin=np.log(bga.mean()*0.1),vmax=np.log(bga.mean()*4))
levs = np.array([5,15,30,60,120,360]) * bg.mean()
if highlight: plt.contour(img,levels=levs)
# plot yellow wavelength marker
# TBD : add curvature
plt.plot(xpnt,ypnt,'+k',markersize=14)
if not skip_field_src:
plot_ellipsoid_regions(Xim,Yim,
Xa,Yb,Thet,b2mag,matched,ondetector,
pivot_ori,pivot_ori,dims,17.,)
if zoom:
#plt.xlim(np.max(np.array([0.,0.])),np.min(np.array([hdr['NAXIS1'],ankerimg[0]+400])))
#plt.ylim(np.max(np.array([0.,ankerimg[1]-400 ])), hdr['NAXIS2'])
plt.xlim(0,2000)
plt.ylim(0,2000)
else:
plt.xlim(0,2000)
plt.ylim(0,2000)
plt.savefig(indir+'/'+obsid+'_map.png',dpi=150)
#plt.show()
plt.close()
if (plot_raw):
#plt.winter()
nsubplots = 2
#if not fit_second: nsubplots=3
# make plot of spectrum [figure 2]
fig2 = plt.figure(2); plt.clf()
plt.subplots_adjust(top=1,hspace=0, wspace=0)
# image slice
ax21 = plt.subplot(nsubplots,1,1)
ac = -ank_c[1]
net[net<=0.] = 1e-16
#plt.imshow(np.log10(net),vmin=-0.8,vmax=0.8, #~FIXME:
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower',cmap=plt.cm.winter)
plt.imshow(np.log10(net),vmin=-10,vmax=2,
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')#,cmap=plt.cm.winter)
#plt.imshow(extimg,vmin=0,vmax=50,
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower')#,cmap=plt.cm.winter)
if highlight:
plt.contour(np.log10(net),levels=[1,1.3,1.7,2.0,3.0],
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')
#plt.imshow( extimg,vmin= (bg1.mean())*0.1,vmax= (bg1.mean()+bg1.std())*2, extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]) )
#levels = np.array([5,10,20,40,70,90.])
#levels = spnet[ank_c[2]:ank_c[3]].max() * levels * 0.01
#if highlight: plt.contour(net,levels=levels,extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]))
# cross_section_plot:
cp2 = cp2/np.max(cp2)*100
#plt.plot(ac+cp2+ank_c[1],np.arange(len(cp2)),'k',lw=2,alpha=0.6,ds='steps') #~TODO:
# plot zeroth orders
if not skip_field_src:
pivot= np.array([ank_c[1],ank_c[0]-offset])
#pivot_ori=ankerimg
mlim = 17.
if wheelpos > 500: mlim = 15.5
plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,
matched,ondetector,
pivot,pivot_ori,
dims2,mlim,
img_angle=angle-180.0,ax=ax21)
# plot line on anchor location
#plt.plot([ac+ank_c[1],ac+ank_c[1]],[0,slit_width],'k',lw=2)
plt.plot(0,ank_c[0],'kx',MarkerSize=5) #~TODO:
# plot position centre of orders
#if present0: plt.plot(ac+q0[0],y0[q0[0]],'k--',lw=1.2)
#plt.plot( ac+q1[0],y1[q1[0]],'k--',lw=1.2)
#if present2: plt.plot(ac+q2[0],y2[q2[0]],'k--',alpha=0.6,lw=1.2)
#if present3: plt.plot(ac+q3[0],y3[q3[0]],'k--',alpha=0.3,lw=1.2)
# plot borders slit region
if present0:
plt.plot(ac+q0[0],borderup [0,q0[0]],'r-')
plt.plot(ac+q0[0],borderdown[0,q0[0]],'r-')
if present1:
plt.plot(ac+q1[0],borderup [1,q1[0]],'r-',lw=1.2)
plt.plot(ac+q1[0],borderdown[1,q1[0]],'r-',lw=1.2)
if present2:
plt.plot(ac+q2[0],borderup [2,q2[0]],'r-',alpha=0.6,lw=1)
plt.plot(ac+q2[0],borderdown[2,q2[0]],'r-',alpha=0.6,lw=1)
if present3:
plt.plot(ac+q3[0],borderup [3,q3[0]],'r-',alpha=0.3,lw=1.2)
plt.plot(ac+q3[0],borderdown[3,q3[0]],'r-',alpha=0.3,lw=1.2)
# plot limits background
plt_bg = np.ones(len(q1[0]))
if (background_lower[0] == None) & (background_upper[0] == None):
background_lower = [0,50] ; background_upper = [slit_width-50,slit_width]
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
else:
if background_lower[0] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[1]),'-k',lw=1.5 )
elif background_lower[1] != None:
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
if background_upper[1] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[1]),'-k',lw=1.5 )
elif background_upper[0] != None:
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
# rescale, title
plt.ylim(0,slit_width)
#plt.ylim(50,150)
if not zoom:
xlim1 = ac+ank_c[2]
xlim2 = ac+ank_c[3]
else:
xlim1 = max(ac+ank_c[2], -420)
xlim2 = min(ac+ank_c[3],1400)
plt.xlim(xlim1,xlim2)
plt.title(obsid+'+'+str(ext))
# first order raw data plot
ax22 = plt.subplot(nsubplots,1,2)
plt.rcParams['legend.fontsize'] = 'small'
if curved == 'straight':
p1, = plt.plot( dis[ank_c[2]:ank_c[3]], spnet[ank_c[2]:ank_c[3]],'k',
ds='steps',lw=0.5,alpha=0.5,label='straight')
p2, = plt.plot( dis[ank_c[2]:ank_c[3]],
spextwidth*(bg1[ank_c[2]:ank_c[3]]+bg2[ank_c[2]:ank_c[3]])*0.5,
'b',alpha=0.5,label='background')
plt.legend([p1,p2],['straight','background'],loc=0,)
if curved != "straight":
p3, = plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'r',ds='steps',label='spectrum')
plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'k',alpha=0.2,ds='steps',label='_nolegend_')
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.5,lw=1.1,ds='steps',label='background')
# bad pixels:
qbad = np.where(quality[q1[0]] > 0)
p4, = plt.plot(x[qbad],(sp_first-bg_first)[qbad],'xk',markersize=4)
#p7, = plt.plot(x[q1[0]],(bg_first)[q1[0]],'r-',alpha=0.3,label='curve_bkg')
# annotation
#plt.legend([p3,p4,p7],['spectrum','suspect','background'],loc=0,)
plt.legend([p3,p7],['spectrum','background'],loc=0,)
maxbg = np.max(bg_first[q1[0]][np.isfinite(bg_first[q1[0]])])
topcnt = 1.2 * np.max([np.max(spnet[q1[0]]),maxbg, np.max((sp_first-bg_first)[q1[0]])])
plt.ylim(np.max([ -20, np.min((sp_first-bg_first)[q1[0]])]), np.min([topcnt, maxcounts]))
if optimal_extraction:
p5, = plt.plot(x[q1[0]],counts[1,q1[0]],'g',alpha=0.5,ds='steps',lw=1.2,label='optimal' )
p6, = plt.plot(x[q1[0]],counts[1,q1[0]],'k',alpha=0.5,ds='steps',lw=1.2,label='_nolegend_' )
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.7,lw=1.1,ds='steps',label='background')
plt.legend([p3,p5,p7],['spectrum','optimal','background'],loc=0,)
topcnt = 1.2 * np.max((sp_first-bg_first)[q1[0]])
ylim1,ylim2 = -10, np.min([topcnt, maxcounts])
plt.ylim( ylim1, ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('1st order counts')
'''
# plot second order
ax23 = plt.subplot(nsubplots,1,3)
plt.rcParams['legend.fontsize'] = 'small'
#plt.xlim(ank_c[2],ank_c[3])
if fit_second:
if curved != 'straight':
p1, = plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'r',label='spectrum')
plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'k',alpha=0.2,label='_nolegend_')
p7, = plt.plot(x[q2[0]],(bg_second)[q2[0]],'y',alpha=0.7,lw=1.1,label='background')
qbad = np.where(quality[q2[0]] > 0)
p2, = plt.plot(x[qbad],(sp_second-bg_second)[qbad],'+k',alpha=0.3,label='suspect')
plt.legend((p1,p7,p2),('spectrum','background','suspect'),loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
if optimal_extraction:
p3, = plt.plot(x[q2[0]],counts[2,q2[0]],'g',alpha=0.5,ds='steps',label='optimal' )
plt.legend((p1,p7,p2,p3),('spectrum','background','suspect','optimal',),loc=2)
#plt.ylim(np.max([ -10,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
if predict2nd :
p4, = plt.plot(dis2p+dist12,flux2p, ds='steps',label='predicted')
p5, = plt.plot(dis2p[np.where(qual2p != 0)]+dist12,flux2p[np.where(qual2p != 0)],'+k',label='suspect',markersize=4)
if optimal_extraction & fit_second:
plt.legend((p1,p2,p3,p4,p5),('curved','suspect','optimal','predicted','suspect'),loc=2)
#plt.ylim(np.max([ -100,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
elif optimal_extraction:
plt.legend((p1,p7,p4,p5),('curved','background','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
elif fit_second:
plt.legend((p1,p2,p4,p5),('curved','suspect','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
else:
plt.legend((p4,p5),('predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('2nd order counts')
'''
'''
if fit_second:
ax24 = plt.subplot(nsubplots,1,4)
plt.rcParams['legend.fontsize'] = 'small'
if (len(q3[0]) > 1) & (curved != "xxx"):
p1, = plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'r',label='spectrum')
plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'k',alpha=0.2,label='_nolegend_')
qbad = np.where(quality[q3[0]] > 0)
p2, = plt.plot(x[qbad],(sp_third-bg_third)[qbad],'xk',alpha=0.3,label='suspect')
p3, = plt.plot(x[q3[0]],bg_third[q3[0]],'y',label='background')
plt.legend([p1,p3,p2],['spectrum','background','suspect'],loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q3[0]])]),\
np.min([np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
if optimal_extraction:
p4, = plt.plot(x[q3[0]],counts[3,q3[0]],'b',alpha=0.5,ds='steps',label='optimal' )
plt.legend([p1,p3,p2,p4],['spectrum','background','suspect','optimal',],loc=2)
#plt.ylim(np.max([ -100,np.min(counts[3,q3[0]]), np.min((sp_second-bg_second)[q3[0]])]),\
# np.min([np.max(counts[3,q3[0]]), np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel(u'3rd order counts')
plt.xlabel(u'pixel distance from anchor position')
'''
plt.savefig(indir+'/'+obsid+'_count.png',dpi=150)
#plt.show()
if (plot_spec):
#plt.winter()
# NEED the flux cal applied!
nsubplots = 1
if not fit_second:
nsubplots = 1
fig3 = plt.figure(3)
plt.clf()
wav1 = polyval(C_1,x[q1[0]])
ax31 = plt.subplot(nsubplots,1,1)
if curved != "xxx":
# PSF aperture correction applies on net rate, but background
# needs to be corrected to default trackwidth linearly
rate1 = ((sp_first[q1[0]]-bg_first[q1[0]] ) * apercorr[1,[q1[0]]]
/expospec[1,[q1[0]]]).flatten()
bkgrate1 = ((bg_first)[q1[0]] * (2.5/trackwidth)
/expospec[1,[q1[0]]]).flatten()
print("computing flux for plot; frametime =",framtime)
flux1,wav1,coi_valid1 = rate2flux(wav1,rate1, wheelpos,
bkgrate=bkgrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]],
#sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, effarea1=EffArea1,
spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker,
#option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
#flux1_err = 0.5*(rate2flux(,,rate+err,,) - rate2flux(,,rate-err,,))
p1, = plt.plot(wav1[np.isfinite(flux1)],flux1[np.isfinite(flux1)],
color='darkred',label=u'curved')
p11, = plt.plot(wav1[np.isfinite(flux1)&(coi_valid1==False)],
flux1[np.isfinite(flux1)&(coi_valid1==False)],'.',
color='lawngreen',
label="too bright")
# PROBLEM quality flags !!!
qbad1 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] < 16))
qbad2 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] == qflag.get("bad")))
plt.legend([p1,p11],[u'calibrated spectrum',u'too bright - not calibrated'])
if len(qbad2[0]) > 0:
p2, = plt.plot(wav1[qbad2],flux1[qbad2],
'+k',markersize=4,label=u'bad data')
plt.legend([p1,p2],[u'curved',u'bad data'])
plt.ylabel(u'1st order flux $(erg\ cm^{-2} s^{-1} \AA^{-1)}$')
# find reasonable limits flux
get_flux_limit = flux1[int(len(wav1)*0.3):int(len(wav1)*0.7)]
get_flux_limit[get_flux_limit==np.inf] = np.nan
get_flux_limit[get_flux_limit==-np.inf]= np.nan
qf = np.nanmax(get_flux_limit)
if qf > 2e-12:
qf = 2e-12
plt.ylim(0.001*qf,1.2*qf)
plt.xlim(1600,6000)
if optimal_extraction: # no longer supported (2013-04-24)
print("OPTIMAL EXTRACTION IS NO LONGER SUPPORTED")
wav1 = np.polyval(C_1,x[q1[0]])
#flux1 = rate2flux(wav1, counts[1,q1[0]]/expo, wheelpos, spectralorder=1, arf1=fluxcalfile)
flux1,wav1,coi_valid1 = rate2flux(wav1,counts[1,q1[0]]/expo, wheelpos, bkgrate=bgkrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]], #sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker, #option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
p3, = plt.plot(wav1, flux1,'g',alpha=0.5,ds='steps',lw=2,label='optimal' )
p4, = plt.plot(wav1,flux1,'k',alpha=0.5,ds='steps',lw=2,label='_nolegend_' )
#plt.legend([p1,p2,p3],['curved','suspect','optimal'],loc=0,)
plt.legend([p1,p3],['curved','optimal'],loc=0,)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
plt.ylabel(u'1st order count rate')
plt.xlim(np.min(wav1)-10,np.max(wav1))
plt.title(obsid+'+'+str(ext))
'''
if fit_second:
ax32 = plt.subplot(nsubplots,1,2)
plt.plot([1650,3200],[0,1])
plt.text(2000,0.4,'NO SECOND ORDER DATA',fontsize=16)
if curved != 'xxx':
wav2 = polyval(C_2,x[q2[0]]-dist12)
rate2 = ((sp_second[q2[0]]-bg_second[q2[0]])*
apercorr[2,[q2[0]]].flatten()/expospec[2,[q2[0]]].flatten() )
bkgrate2 = ((bg_second)[q2[0]] * (2.5/trackwidth)
/expospec[2,[q2[0]]]).flatten()
flux2,wav2,coi_valid2 = rate2flux(wav2, rate2, wheelpos,
bkgrate=bkgrate2,
co_sprate = (co_second[q2[0]]/expospec[2,[q2[0]]]).flatten(),
co_bgrate = (co_back [q2[0]]/expospec[2,[q2[0]]]).flatten(),
pixno=x[q2[0]],
arf1=fluxcalfile, arf2=None,
frametime=framtime, effarea2=EffArea2,
spectralorder=2,swifttime=tstart,
anker=anker2,
debug=False,chatter=1)
#flux1_err = rate2flux(wave,rate_err, wheelpos, spectralorder=1,)
plt.cla()
print('#############################')
print(wav2[100],flux2[100],wav2,flux2)
p1, = plt.plot(wav2,flux2,'r',label='curved')
plt.plot(wav2,flux2,'k',alpha=0.2,label='_nolegend_')
qbad1 = np.where((quality[np.array(x[q2[0]],dtype=int)] > 0) & (quality[np.array(x[q2[0]],dtype=int)] < 16))
p2, = plt.plot(wav2[qbad1],flux2[qbad1],'+k',markersize=4,label='suspect data')
plt.legend(['uncalibrated','suspect data'])
plt.ylabel(u'estimated 2nd order flux')
plt.xlim(1600,3200)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
if np.sum(qf[0]) > 0:
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
#else: plt.ylim(1e-16,2e-12)
else: plt.ylim(1e-12,1e-11)
# final fix to limits of fig 3,1
y31a,y31b = ax31.get_ylim()
setylim = False
if y31a < 1e-16:
y31a = 1e-16
setylim = True
if y31b > 1e-12:
y31b = 1e-12
setylim = True
if setylim: ax31.set_ylim(bottom=y31a,top=y31b)
#
'''
plt.xlabel(u'$\lambda(\AA)$',fontsize=16)
plt.savefig(indir+'/'+obsid+'_flux.png',dpi=150)
# to plot the three figures
#plt.show()
# output parameter
Y1 = ( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 )
# output parameter
Y2 = fit, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec
Yout.update({"Yfit":Yfit})
# writing output to a file
#try:
if wr_outfile: # write output file
if ((chatter > 0) & (not clobber)): print("trying to write output files")
import uvotio
if (curved == 'straight') & (not optimal_extraction):
ank_c2 = np.copy(ank_c) ; ank_c2[1] -= m1
F = uvotio.wr_spec(RA,DEC,filestub,ext,
hdr,anker,anker_field[0],anker_field[1],
dis[aa],wav1,
spnet[aa]/expo,bg[aa]/expo,
bg1[aa]/expo,bg2[aa]/expo,
offset,ank_c2,extimg, C_1,
history=None,chatter=1,
clobber=clobber,
calibration_mode=calmode,
interactive=interactive)
elif not optimal_extraction:
if fileversion == 2:
Y = Yout
elif fileversion == 1:
Y = (Y0,Y1,Y2,Y4)
F = uvotio.writeSpectrum(RA,DEC,filestub,ext, Y,
fileoutstub=outfile,
arf1=fluxcalfile, arf2=None,
fit_second=fit_second,
write_rmffile=write_RMF, fileversion=1,
used_lenticular=use_lenticular_image,
history=msg,
calibration_mode=calmode,
chatter=chatter,
clobber=clobber )
elif optimal_extraction:
Y = (Y0,Y1,Y2,Y3,Y4)
F = uvotio.OldwriteSpectrum(RA,DEC,filestub,ext, Y, mode=2,
quality=quality, interactive=False,fileout=outfile,
updateRMF=write_rmffile, \
history=msg, chatter=5, clobber=clobber)
#except (RuntimeError, IOError, ValueError):
# print "ERROR writing output files. Try to call uvotio.wr_spec."
# pass
# clean up fake file
if tempntags.__contains__('fakefilestub'):
filestub = tempnames[tempntags.index('fakefilestub')]
os.system('rm '+indir+filestub+'ufk_??.img ')
# update Figure 3 to use the flux...
# TBD
# write the summary
sys.stdout.write(msg)
sys.stdout.write(msg2)
flog = open(logfile,'a')
flog.write(msg)
flog.write(msg2)
flog.close()
#plt.show()
if give_result: return Y0, Y1, Y2, Y3, Y4
if give_new_result: return Yout
def extractSpecImg(file,ext,anker,angle,anker0=None,anker2=None, anker3=None,\
searchwidth=35,spwid=13,offsetlimit=None, fixoffset=None,
background_lower=[None,None], background_upper=[None,None],
template=None, x_offset = False, ank_c_0offset=False, replace=None,
clobber=True,chatter=2,singleside_bkg=False):
'''
extract the grism image of spectral orders plus background
using the reference point at 2600A in first order.
Parameters
----------
file : str
input file location
ext : int
extension of image
anker : list, ndarray
X,Y coordinates of the 2600A (1) point on the image in image coordinates
angle : float
angle of the spectrum at 2600A in first order from zemax e.g., 28.8
searchwidth : float
find spectrum with this possible offset ( in crowded fields
it should be set to a smaller value)
template : dictionary
template for the background.
use_rectext : bool
If True then the HEADAS uvotimgrism program rectext is used to extract the image
This is a better way than using ndimage.rotate() which does some weird smoothing.
offsetlimit : None, float/int, list
if None, search for y-offset predicted anchor to spectrum using searchwidth
if float/int number, search for offset only up to a distance as given from y=100
if list, two elements, no more. [y-value, delta-y] for search of offset.
if delta-y < 1, fixoffset = y-value.
History
-------
2011-09-05 NPMK changed interpolation in rotate to linear, added a mask image to
make sure to keep track of the new pixel area.
2011-09-08 NPMK incorporated rectext as new extraction and removed interactive plot,
curved, and optimize which are now olsewhere.
2014-02-28 Add template for the background as an option
2014-08-04 add option to provide a 2-element list for the offsetlimit to constrain
the offset search range.
'''
import numpy as np
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import scipy.ndimage as ndimage
#out_of_img_val = -1.0123456789 now a global
Tmpl = (template != None)
if Tmpl:
if template['sumimg']:
raise IOError("extractSpecImg should not be called when there is sumimage input")
if chatter > 4:
print('extractSpecImg parameters: file, ext, anker, angle')
print(file,ext)
print(anker,angle)
print('searchwidth,chatter,spwid,offsetlimit, :')
print(searchwidth,chatter,spwid,offsetlimit)
img, hdr = pyfits.getdata(file,ext,header=True)
if isinstance(replace,np.ndarray):
img = replace
# wcs_ = wcs.WCS(header=hdr,) # detector coordinates DETX,DETY in mm
# wcsS = wcs.WCS(header=hdr,key='S',relax=True,) # TAN-SIP coordinate type
if Tmpl:
if (img.shape != template['template'].shape) :
print("ERROR")
print("img.shape=", img.shape)
print("background_template.shape=",template['template'].shape)
raise IOError("The templare array does not match the image")
wheelpos = hdr['WHEELPOS']
if chatter > 4: print('wheelpos:', wheelpos)
if not use_rectext:
# now we want to extend the image array and place the anchor at the centre
s1 = 0.5*img.shape[0]
s2 = 0.5*img.shape[1]
d1 = -(s1 - anker[1]) # distance of anker to centre img
d2 = -(s2 - anker[0])
n1 = 2.*abs(d1) + img.shape[0] + 400 # extend img with 2.x the distance of anchor
n2 = 2.*abs(d2) + img.shape[1] + 400
#return img, hdr, s1, s2, d1, d2, n1, n2
if 2*int(n1/2) == | int(n1) | numpy.int |
import numpy as np
from lsst import geom
import tqdm
from ..matching import do_balrogesque_matching
def _make_balrogesque_cat(n, seed):
rng = np.random.RandomState(seed=seed)
data = np.zeros(n, dtype=[("ra", "f8"), ("dec", "f8"), ("flux", "f8")])
data["ra"] = rng.uniform(size=n) * 1/60
data["dec"] = np.arcsin(rng.uniform(size=n, low=-1/60, high=1/60)) / np.pi * 180.0
data["flux"] = rng.uniform(size=n, low=1, high=10)
return data
def test_do_balrogesque_matching():
fsi_det_cat = _make_balrogesque_cat(100, 34489)
fsi_truth_cat = _make_balrogesque_cat(100000, 3448)
orig_det_cat = _make_balrogesque_cat(10000, 43)
match_flag, match_index = do_balrogesque_matching(
fsi_det_cat, orig_det_cat, fsi_truth_cat, "flux",
)
# make sure we get all types of matches in our test
assert set( | np.unique(match_flag) | numpy.unique |
import matplotlib.pyplot as plt
import sklearn.datasets as skdata
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn import metrics
numeros = skdata.load_digits()
target = numeros['target']
imagenes = numeros['images']
n_imagenes = len(target)
data = imagenes.reshape((n_imagenes, -1)) # para volver a tener los datos como imagen basta hacer data.reshape((n_imagenes, 8, 8))
print(np.shape(data))
scaler = StandardScaler()
x_2, x_validation, y_2, y_validation = train_test_split(data, target, train_size=0.8)
x_train, x_test, y_train, y_test = train_test_split(x_2, y_2, train_size=0.5)
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_validation = scaler.transform(x_validation)
cov = np.cov(x_train.T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = | np.argsort(-valores) | numpy.argsort |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_arrays
from ..utils import atleast2d_or_csc
from ..utils import array2d
from ..utils import atleast2d_or_csr
from ..utils import safe_asarray
from ..utils import warn_if_not_float
from ..utils.sparsefuncs import inplace_csr_row_normalize_l1
from ..utils.sparsefuncs import inplace_csr_row_normalize_l2
from ..utils.sparsefuncs import inplace_csr_column_scale
from ..utils.sparsefuncs import mean_variance_axis0
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'Scaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis0(X)
var[var == 0.0] = 1.0
inplace_csr_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
`min_` : ndarray, shape (n_features,)
Per feature adjustment for minimum.
`scale_` : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
data_range[data_range == 0.0] = 1.0
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_arrays(X, sparse_format="dense", copy=self.copy)[0]
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
`mean_` : array of floats with shape [n_features]
The mean value for each feature in the training set.
`std_` : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_arrays(X, copy=self.copy, sparse_format="csr")[0]
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis0(X)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
X = check_arrays(X, copy=copy, sparse_format="csr")[0]
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_csr_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_csr_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class Scaler(StandardScaler):
def __init__(self, copy=True, with_mean=True, with_std=True):
warnings.warn("Scaler was renamed to StandardScaler. The old name "
" will be removed in 0.15.", DeprecationWarning)
super(Scaler, self).__init__(copy, with_mean, with_std)
def normalize(X, norm='l2', axis=1, copy=True):
"""Normalize a dataset along any axis
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)[:, np.newaxis]
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis]
norms[norms == 0.0] = 1.0
X /= norms
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
atleast2d_or_csr(X)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
sparse_format = "csr" # We force sparse format to be either csr or csc.
if hasattr(X, "format"):
if X.format in ["csr", "csc"]:
sparse_format = X.format
X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0]
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
atleast2d_or_csr(X)
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = array2d(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
K = array2d(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = safe_asarray(X)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = atleast2d_or_csc(X, copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = | np.sum(sel) | numpy.sum |
import pandas as pd
import numpy as np
import logging
from flarestack.analyses.ccsn.necker_2019.ccsn_helpers import \
raw_sn_catalogue_name, updated_sn_catalogue_name, sn_times, pdf_names, conservative_redshift_addition
from flarestack.analyses.ccsn.stasik_2017.shared_ccsn import sn_catalogue_name
import math
logging.getLogger().setLevel("INFO")
# columns in raw catalogues
columns = np.array(['name', 'discovery', 'ra', 'dec', 'redshift', 'lum dist [Mpc]', 'weight', 'flag'])
# columns for output catalogues
columns_out = columns.copy()
columns_out[columns == 'lum dist [Mpc]'] = 'distance'
columns_out = np.array([
'source_name',
'ref_time_mjd',
'ra_rad',
'dec_rad',
'redshift',
'distance_mpc',
'weight',
'flag'
])
# columns to be added
to_add = {
'names': ['injection_weight_modifier'],
'values': [1.],
'formats': ['<f8']
}
columns_out = np.append(columns_out, to_add['names'])
# data type for PDF catalogues
dt = {
'names': columns_out,
'formats': ['<U50', 'i'] + ['<f8'] * 6 + to_add['formats']
}
# columns that are not to be replaced when looking for values in catalogues of different PDFs
not_to_be_replaced = ['weight']
not_to_be_replaced_inds = [np.where(columns == col_name)[0][0] for col_name in not_to_be_replaced]
keep_inds = np.delete(np.array(range(len(columns))), not_to_be_replaced_inds)
keep_inds_out = np.append(keep_inds, [np.where(columns_out == col)[0][0] for col in to_add['names']])
# data type for combined catalogues
dt_comb = {
'names': columns_out[keep_inds_out],
'formats': np.array(dt['formats'])[keep_inds_out]
}
def load_catalogue(sn_type, pdf_name, include_flagged=False, z_add=0):
"""
load the catalogue from the csv file
:param sn_type: string, e.g. 'IIP'
:param pdf_name: string, e.g. '300 day box', '0.2 decay function', 'missed_objects'
:param include_flagged: bool, if True also objects that are flagged are included
:param z_add: float, optional, if given, this number is added to all redshifts
:return: np.ndarray, columns name, discovery, ra, dec, redshift, distance, weight
"""
logging.info(f'loading the catalogue for {sn_type}: {pdf_name}')
csv_filename = raw_sn_catalogue_name(sn_type)
logging.info(f'filename: {csv_filename}')
raw_catalogues = pd.read_csv(csv_filename)
# get only the part of the catalogue that belongs to the given PDF
col_catalogue = np.array(raw_catalogues.catalogue)
logging.debug(f'looking for the right pdf in {col_catalogue}')
pdf_names_mask = [type(ob) is str for ob in col_catalogue]
pdf_names = col_catalogue[pdf_names_mask]
pdf_names_inds = np.where(pdf_names_mask)[0]
if len(np.where(pdf_names == pdf_name)[0]) < 1:
raise IndexError(f'PDF {pdf_name} not available for {sn_type}')
else:
pdf_name_ind = np.where(pdf_names == pdf_name)[0][0]
# if the last PDF in the list is selected, get everything until last row
if pdf_name_ind == len(pdf_names) - 1:
inds = [pdf_names_inds[pdf_name_ind]+3, None]
else:
inds = [pdf_names_inds[pdf_name_ind]+3, pdf_names_inds[pdf_name_ind+1]-2]
logging.debug(f'The pdf names are stored under indices {pdf_names_inds}')
logging.debug(f'Getting elements [{inds[0]} : {inds[1]}]')
name_ind_in_columns = np.where(columns == 'name')[0][0]
logging.debug(f'index of "name" in columns is {name_ind_in_columns}')
catalogue = np.array(
raw_catalogues.values[
inds[0] : inds[1],
keymap(columns, raw_catalogues)
]
)
logging.debug(f'after selecting right PDF: {catalogue}')
# get only rows with the values, e.g. the rows where 'name' is not NaN
objects_names_inds = []
for i, element in enumerate(catalogue[:, name_ind_in_columns]):
if type(element) is (list or np.array):
if len(element) > 1:
raise ValueError(f'more than one name for object')
else: element = element[0]
if type(element) is str: objects_names_inds += [i]
logging.debug(f'getting the rows with indices {objects_names_inds}')
catalogue = catalogue[objects_names_inds, :]
logging.debug(f'after removing NaNs from the name column: \n {catalogue}')
# for empty rows, get the values from other catalogues (except weights!)
for i, row in enumerate(catalogue):
this_discovery = row[columns == 'discovery'][0]
if np.isnan(this_discovery):
logging.debug(f'discovery for row {i} is NaN')
raw_cat_arr = raw_catalogues.values[:, keymap(columns, raw_catalogues)]
# select only rows where the name is the name of this row
raw_cat_arr = raw_cat_arr[raw_cat_arr[:, name_ind_in_columns] == row[name_ind_in_columns], :]
replacement = raw_cat_arr[
np.invert(
np.isnan(
np.array(raw_cat_arr[:, np.where(columns == 'discovery')[0][0]],
dtype='<f8')
)
),
keep_inds
]
logging.debug(f'replacing this row with {replacement}')
catalogue[i][keep_inds] = replacement
logging.debug(f'after replacing missing fields with values from other PDFs: \n {catalogue}')
add_list = [to_add['values']] * len(catalogue)
logging.debug(f'adding {add_list} to array')
catalogue = np.append(catalogue, add_list, axis=1)
logging.debug(f'after adding columns: \n {catalogue}')
# convert strings with commas to floats
for i, format in enumerate(dt['formats']):
if ('f' in format) and (type(catalogue[0,i]) is str):
logging.debug(f'converting {columns_out[i]}: {catalogue[:,i]} to floats')
catalogue[:,i] = [float(st.replace(',', '.')) for st in catalogue[:, i]]
logging.debug(f'after converting strings to floats: \n {catalogue}')
# set the right data type to array
catalogue = np.array(
[tuple(row) for row in catalogue],
dtype=dt
)
logging.debug(f'after setting the right data type: \n {catalogue}')
# convert ra and dec to radians
conversion_factor = math.pi/180
catalogue['ra_rad'] = catalogue['ra_rad']*conversion_factor
catalogue['dec_rad'] = catalogue['dec_rad']*conversion_factor
logging.debug(f'after converting to radians: \n {catalogue}')
if not include_flagged:
catalogue = catalogue[np.isnan(catalogue['flag'])]
logging.debug(f'after removing flagged objects: \n {catalogue}')
if z_add:
logging.debug(f'adding {z_add} to all redshifts!')
catalogue['redshift'] += z_add
return catalogue
def keymap(keys, raw_catalogue):
indices = [
np.where(np.array(raw_catalogue.columns) == key)[0][0]
if len(np.where(np.array(raw_catalogue.columns) == key)) <= 1
else np.where(np.array(raw_catalogue.columns) == key)
for key in keys
]
logging.debug(f'keymap gives indices {indices} for keys {keys}')
return indices
class InconsistencyError(Exception):
def __init__(self, msg):
self.message = msg
# ======================== #
# === execute building === #
# ======================== #
if __name__ == '__main__':
logging.info('building catalogues')
for z_add in [0, conservative_redshift_addition]:
if z_add:
logging.debug(f'adding {z_add} to all redshifts!')
for flag in [True, False]:
msg = 'Including flagged objects' if flag else 'Not including flagged objects'
logging.info(msg)
for sn_type in sn_times:
logging.info(f'building catalogue for sn type {sn_type}')
start = True
for pdf_type in sn_times[sn_type]:
logging.info(f'pdf type: {pdf_type}')
for pdf_time in sn_times[sn_type][pdf_type]:
# get catalogues for individual PDFs
pdf_name = pdf_names(pdf_type, pdf_time)
logging.info(f'pdf time: {pdf_time}')
catalogue = load_catalogue(sn_type, pdf_name, include_flagged=flag, z_add=z_add)
savename = updated_sn_catalogue_name(sn_type, pdf_name, flagged=flag, z_conservative=z_add)
np.save(savename, catalogue)
# combine with previous PDF-catalogues
catalogue_red = catalogue[columns_out[keep_inds_out]]
if start: combined_catalogue = catalogue_red
start = False
new_mask = np.invert([name in combined_catalogue['source_name']
for name in catalogue_red['source_name']])
new_objects = catalogue_red[new_mask]
combined_catalogue = np.array(list(combined_catalogue) + list(new_objects),
dtype=dt_comb)
# check consistency
# treat field 'flag' separately because difficulty with comparing NaNs in lists
special_field = 'flag'
to_check = columns_out[keep_inds_out][np.invert(columns_out[keep_inds_out] == special_field)]
for old in catalogue_red:
element_in_combined_catalogue = \
combined_catalogue[combined_catalogue['source_name'] == old['source_name']][0]
if (not np.array_equiv(old[to_check] ,element_in_combined_catalogue[to_check])) or \
not (
| np.isnan(old[special_field]) | numpy.isnan |
from typing import Type
import torch
import os
import unittest
import heat as ht
import numpy as np
from ...tests.test_suites.basic_test import TestCase
class TestLinalgBasics(TestCase):
def test_cross(self):
a = ht.eye(3)
b = ht.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
# different types
cross = ht.cross(a, b)
self.assertEqual(cross.shape, a.shape)
self.assertEqual(cross.dtype, a.dtype)
self.assertEqual(cross.split, a.split)
self.assertEqual(cross.comm, a.comm)
self.assertEqual(cross.device, a.device)
self.assertTrue(ht.equal(cross, ht.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])))
# axis
a = ht.eye(3, split=0)
b = ht.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]], dtype=ht.float, split=0)
cross = ht.cross(a, b)
self.assertEqual(cross.shape, a.shape)
self.assertEqual(cross.dtype, a.dtype)
self.assertEqual(cross.split, a.split)
self.assertEqual(cross.comm, a.comm)
self.assertEqual(cross.device, a.device)
self.assertTrue(ht.equal(cross, ht.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])))
a = ht.eye(3, dtype=ht.int8, split=1)
b = ht.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]], dtype=ht.int8, split=1)
cross = ht.cross(a, b, axis=0)
self.assertEqual(cross.shape, a.shape)
self.assertEqual(cross.dtype, a.dtype)
self.assertEqual(cross.split, a.split)
self.assertEqual(cross.comm, a.comm)
self.assertEqual(cross.device, a.device)
self.assertTrue(ht.equal(cross, ht.array([[0, 0, -1], [-1, 0, 0], [0, -1, 0]])))
# test axisa, axisb, axisc
np.random.seed(42)
np_a = np.random.randn(40, 3, 50)
np_b = np.random.randn(3, 40, 50)
np_cross = np.cross(np_a, np_b, axisa=1, axisb=0)
a = ht.array(np_a, split=0)
b = ht.array(np_b, split=1)
cross = ht.cross(a, b, axisa=1, axisb=0)
self.assert_array_equal(cross, np_cross)
cross_axisc = ht.cross(a, b, axisa=1, axisb=0, axisc=1)
np_cross_axisc = np.cross(np_a, np_b, axisa=1, axisb=0, axisc=1)
self.assert_array_equal(cross_axisc, np_cross_axisc)
# test vector axes with 2 elements
b_2d = ht.array(np_b[:-1, :, :], split=1)
cross_3d_2d = ht.cross(a, b_2d, axisa=1, axisb=0)
np_cross_3d_2d = np.cross(np_a, np_b[:-1, :, :], axisa=1, axisb=0)
self.assert_array_equal(cross_3d_2d, np_cross_3d_2d)
a_2d = ht.array(np_a[:, :-1, :], split=0)
cross_2d_3d = ht.cross(a_2d, b, axisa=1, axisb=0)
np_cross_2d_3d = np.cross(np_a[:, :-1, :], np_b, axisa=1, axisb=0)
self.assert_array_equal(cross_2d_3d, np_cross_2d_3d)
cross_z_comp = ht.cross(a_2d, b_2d, axisa=1, axisb=0)
np_cross_z_comp = np.cross(np_a[:, :-1, :], np_b[:-1, :, :], axisa=1, axisb=0)
self.assert_array_equal(cross_z_comp, np_cross_z_comp)
a_wrong_split = ht.array(np_a[:, :-1, :], split=2)
with self.assertRaises(ValueError):
ht.cross(a_wrong_split, b, axisa=1, axisb=0)
with self.assertRaises(ValueError):
ht.cross(ht.eye(3), ht.eye(4))
with self.assertRaises(ValueError):
ht.cross(ht.eye(3, split=0), ht.eye(3, split=1))
if torch.cuda.is_available():
with self.assertRaises(ValueError):
ht.cross(ht.eye(3, device="gpu"), ht.eye(3, device="cpu"))
with self.assertRaises(TypeError):
ht.cross(ht.eye(3), ht.eye(3), axis="wasd")
with self.assertRaises(ValueError):
ht.cross(ht.eye(3, split=0), ht.eye(3, split=0), axis=0)
def test_dot(self):
# ONLY TESTING CORRECTNESS! ALL CALLS IN DOT ARE PREVIOUSLY TESTED
# cases to test:
data2d = | np.ones((10, 10)) | numpy.ones |
######################################################
# Suboptiaml solver for the set/vertex cover problem (CITATION)
# O(n^3) time with O(log n) suboptimality.
# We solve a linear
# Problem:
# minimize c^T x
# subject to Ax >= e
# A is a set of sets. Each column represent a set of elements.
# e = (1, 1, ..., 1)
# x is a 0-1 vector with size of n
# Dual of the problem
# maximize e^T y
# subject to y^T A <= c
# Js = {j | y^T Aj = cj}
# Solution <- x(Js)
# Then we greedily remove a redundant element from the solution.
#######################################################
import numpy as np
from scipy.optimize import linprog
def find_cover(A, c):
# First we solve a dual of the linear relaxation.
dual_c = -1 * np.ones(A.shape[0])
dual_A = A.transpose()
dual_b = c
print(dual_A)
ret = linprog(c=dual_c, A_ub=dual_A, b_ub=dual_b)
y = ret.x
costs = np.matmul(y.transpose(), A)
print("y=", y)
print("costs=", costs)
print("c=", c)
# x(Js) will be a feasible solution with a bound of O(log n).
Js = []
x = | np.zeros(A.shape[1]) | numpy.zeros |
import numpy as np
import cv2
import open3d as o3d
from Config import Config
from matplotlib import pyplot as plt
from Optimizer import *
from Keyframe import *
from utilities import rot_to_angle, rot_to_heading
from scipy.spatial.transform import Rotation
class Tracking:
"""Track the input image with respect to previous images"""
"""fx=fy=f=imageWidth /(2 * tan(CameraFOV * π / 360))"""
def __init__(self):
self.current_frame = None
self.ref_keyframe = None
self.image_queue = [] # ?
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # create BFMatcher object
self.insert_new_kf = False
self.new_kf_inserted = False
self.tracking_success = False
self.map = []
# self.current_rot = np.eye(3)
# self.current_pos = np.array([0, 0, 0])
self.current_pose = np.eye(4)
self.fx = Config().fx
self.fy = Config().fy
self.cx = Config().cx
self.cy = Config().cy
self.bf = Config().bf
self.reprojection_threshold = 0.3
self.n_loop_closures = 0 # Counter for the number of verified loop closures
self.grid_dim = (31, 31, 10) # x_grid, z_grid, [x,z,heading uncertainty + 1 layer occupancy]
self.grid_center = (self.grid_dim[0]//2, self.grid_dim[1]//2, (self.grid_dim[2]-1)//2)
self.grid_length = 8.0
self.grid_size = self.grid_length/self.grid_dim[0]
self.grid_size_angle = 2*np.pi/(self.grid_dim[2]-1) # if e.g. grid_dim[2] == 11, then 9 divisions
self.pgo = PoseGraphOptimizerGTSAM()
self.result = None
self.marginals = None
self.loop_closure_inserted = False
def grab_frame(self, frame):
self.current_frame = frame
if not self.ref_keyframe:
self.ref_keyframe = Keyframe(frame)
self.ref_keyframe.key_point_initializer()
self.ref_keyframe.set_pose(np.eye(3), [0, 0, 0])
self.map.append(self.ref_keyframe)
self.result, self.marginals = self.pgo.add_node_optimize(self.ref_keyframe)
return True
else:
return self.track()
def track(self):
n_matched = 0
candidate_ref_keyframe = None
new_kf = None
list_kf = [kf[0] for kf in self.ref_keyframe.neighbors]
# if self.ref_keyframe not in list_kf:
# list_kf.append(self.ref_keyframe)
if self.ref_keyframe in list_kf:
list_kf.remove(self.ref_keyframe)
list_kf = sorted(list_kf, key=lambda x: x.kfID, reverse=False)
list_kf.append(self.ref_keyframe)
list_kf_n = len(list_kf)
list_kf_correspondence = []
count_success = 0
n_max = 0
if list_kf_n == 1:
self.insert_new_kf = True
for i in range(list_kf_n - 1, -1, -1):
key_fr = list_kf[i]
if count_success <= 3 or (self.new_kf_inserted and not self.tracking_success):
flag_success, rel_pose, matches = self.visual_odometry_teaser(self.current_frame, key_fr)
else:
break
if not flag_success:
del list_kf[i] # is this really necessary?
if i == list_kf_n-1: # If tracking reference keyframe failed, insert new kf
self.insert_new_kf = True
continue
else: # Decide if the current frame should be converted into a new keyframe
list_kf_correspondence.insert(0, (rel_pose, matches))
if i == list_kf_n-1 and (len(matches) < 0.55*key_fr.n_kp or len(matches) < 300):
self.insert_new_kf = True
print("Decided to convert to kf, matches: ", len(matches), " KPs: ", key_fr.n_kp, " KFID: ",
key_fr.kfID)
if len(matches) > 0.85*key_fr.n_kp:
self.tracking_success = True
print("Tracking was successful!")
# list_kf_correspondence[i] = len(matches)
count_success += 1
# if i == len(list_kf) - 1:
rel_rot = rel_pose[:3, :3]
rel_trans = rel_pose[:3, 3]
rot = key_fr.rot().dot(rel_rot) # rot = rel_rot.dot(key_fr.rot())
trans = key_fr.pos() + key_fr.rot().dot(rel_trans) # trans = rel_trans + rel_rot.dot(key_fr.pos())
self.current_pose[0:3, 0:3] = rot
self.current_pose[0:3, 3] = trans
# if self.insert_new_kf and not self.new_kf_inserted: # Convert current frame into new kf
if self.insert_new_kf and not self.new_kf_inserted: # Convert current frame into new kf
self.new_kf_inserted = True
new_kf = Keyframe(self.current_frame) # Now add key points to the new kf
self.map.append(new_kf)
new_kf.set_pose(rot, trans)
if self.new_kf_inserted:
for p in matches:
idx_kf = p.queryIdx
idx_cf = p.trainIdx
kp = key_fr.get_key_point(idx_kf)
if kp:
new_kf.add_key_point(idx_cf, kp)
else:
d = key_fr.depth[idx_kf]
pos = np.array([(key_fr.fp[idx_kf].pt[0]-self.cx)/self.fx*d,
(key_fr.fp[idx_kf].pt[1]-self.cy)/self.fy*d, d])
pos = key_fr.rot().dot(pos) + key_fr.pos()
kp = key_fr.new_key_point(idx_kf, pos)
new_kf.add_key_point(idx_cf, kp)
print("New KF initialized: <", new_kf.kfID, "> ", len(new_kf.key_points))
score = Keyframe.kfdb.score_l1(new_kf.bow, key_fr.bow, normalize=True)
key_fr.neighbors.append((new_kf, rel_pose, score))
new_kf.neighbors.append((key_fr, np.linalg.inv(rel_pose), score))
# Change the reference kf to the one with max correspondence
n_matched = len(matches)
if n_matched > n_max:
candidate_ref_keyframe = key_fr
n_max = n_matched
if self.new_kf_inserted:
# self.ref_keyframe = new_kf
print("New KF Neighbors: ", [kf[0].kfID for kf in new_kf.neighbors])
if candidate_ref_keyframe:
self.ref_keyframe = candidate_ref_keyframe
print("REF KF: ", self.ref_keyframe.kfID, " keypoints: ", len(self.ref_keyframe.key_points),
" Neighbors: ", [kf_[0].kfID for kf_ in self.ref_keyframe.neighbors])
else:
print("Number of matched features: ", n_matched)
# Check BOW vectors for loop closure detection
if self.new_kf_inserted:
list_candidates = Keyframe.kfdb.get_candidates(new_kf)
print("THE LIST OF CANDIDATES FOR LOOP CLOSURE: ", [kf.kfID for kf in list_candidates])
for kf in list_candidates:
self.loop_closure_teaser(new_kf, kf)
if self.new_kf_inserted:
self.result, self.marginals = self.pgo.add_node_optimize(new_kf)
self.insert_new_kf = False
self.new_kf_inserted = False
self.tracking_success = False
return count_success > 0
def visual_odometry_teaser(self, current_f, key_f):
flag_reproject = True
# kf_des = key_f.des
# Fetch descriptors from the keypoints of key_frame
kf_des = np.zeros([key_f.n_kp, 32], dtype=np.uint8)
kf_kp_indices = []
for idx, kp_idx in enumerate(key_f.key_points):
kf_des[idx, :] = key_f.key_points[kp_idx].des
kf_kp_indices.append(kp_idx)
# Match keypoint descriptors with the features of the current frame
matches = self.matcher.match(kf_des, current_f.des)
# matches = sorted(matches, key=lambda x: x.distance) # Sort them in the order of their distance.
if len(matches) < 30:
print("VO failed due to lack of feature matches: ", len(matches))
return False, None, []
# if len(matches) < key_f.n_kp * 0.55 or len(matches) < 300:
# self.insert_new_kf = True
# print("Decision to convert to kf, matches: ", len(matches), " KPs: ", key_f.n_kp, " KFID: ", key_f.kfID)
src = np.zeros((3, len(matches)), dtype=float)
dst = np.zeros((3, len(matches)), dtype=float)
for idx, p in enumerate(matches):
p.queryIdx = kf_kp_indices[p.queryIdx]
src[:, idx] = self.obs_to_3d(current_f.fp[p.trainIdx].pt[0], current_f.fp[p.trainIdx].pt[1],
current_f.depth[p.trainIdx])
dst[:, idx] = self.obs_to_3d(key_f.fp[p.queryIdx].pt[0], key_f.fp[p.queryIdx].pt[1],
key_f.depth[p.queryIdx])
optim = PoseOptimizerTeaser()
pose = optim.optimize(src, dst)
rot = pose[0:3, 0:3]
trans = pose[0:3, 3]
edge_outlier = []
for idx, p in enumerate(matches):
pf = self.obs_to_3d(current_f.fp[p.trainIdx].pt[0], current_f.fp[p.trainIdx].pt[1],
current_f.depth[p.trainIdx])
pkf = self.obs_to_3d(key_f.fp[p.queryIdx].pt[0], key_f.fp[p.queryIdx].pt[1], key_f.depth[p.queryIdx])
error = pkf - rot.dot(pf) - trans
# print(np.linalg.norm(error))
if np.linalg.norm(error) < 2:
edge_outlier.append(False)
else:
edge_outlier.append(True)
if np.linalg.norm(pose[0:3, 3]) > 2:
print("VO failed due to bad translation: ", np.linalg.norm(pose[0:3, 3]), " matches: ", len(matches))
return False, None, []
elif len(edge_outlier) - np.sum(edge_outlier) < 60:
print("VO failed due to lack of enough matches: ", len(edge_outlier) - np.sum(edge_outlier))
return False, None, []
matches_inlier = [p for idx, p in enumerate(matches) if not edge_outlier[idx]]
if flag_reproject:
fp_inliers_idx_kf = [p.queryIdx for p in matches_inlier]
fp_inliers_idx_f = [p.trainIdx for p in matches_inlier]
new_matches = self.reproject_features(current_f, key_f, pose,
fp_inliers_idx_f, fp_inliers_idx_kf)
matches_inlier.extend(new_matches)
print("VO succeeded, init. inliers: ", len(edge_outlier) - np.sum(edge_outlier))
return True, pose, matches_inlier
def reproject_features(self, current_f, key_f, pose, fp_inliers_idx_f, fp_inliers_idx_kf):
rot = pose[0:3, 0:3]
trans = pose[0:3, 3]
n_inliers = len(fp_inliers_idx_kf)
# assert len(fp_inliers_idx_kf) == len(fp_inliers_idx_f)
if len(key_f.fp)-n_inliers < 50 or len(current_f.fp)-n_inliers < 50:
return []
kf_des = np.empty([len(key_f.fp)-n_inliers, 32], dtype=np.uint8)
f_des = np.empty([len(current_f.fp)-n_inliers, 32], dtype=np.uint8)
kf_indices = []
f_indices = []
counter = 0
for idx, fp in enumerate(current_f.fp):
if idx in fp_inliers_idx_f:
continue
f_des[counter, :] = current_f.des[idx, :]
counter += 1
f_indices.append(idx)
counter = 0
for idx, fp in enumerate(key_f.fp):
if idx in fp_inliers_idx_kf:
continue
kf_des[counter, :] = key_f.des[idx, :]
counter += 1
kf_indices.append(idx)
matches = self.matcher.match(kf_des, f_des)
n_reprojected = 0
new_matches = []
for p in matches:
p.queryIdx = kf_indices[p.queryIdx]
p.trainIdx = f_indices[p.trainIdx]
dkf = key_f.depth[p.queryIdx]
df = current_f.depth[p.trainIdx]
pkf = self.obs_to_3d(key_f.fp[p.queryIdx].pt[0], key_f.fp[p.queryIdx].pt[1], dkf)
pf = self.obs_to_3d(current_f.fp[p.trainIdx].pt[0], current_f.fp[p.trainIdx].pt[1]
, df)
error = pkf - rot.dot(pf) - trans
# print(np.linalg.norm(error))
if np.linalg.norm(error) < self.reprojection_threshold:
n_reprojected += 1
kp = key_f.new_key_point(p.queryIdx, pkf)
key_f.add_key_point(p.queryIdx, kp)
new_matches.append(p)
print(n_reprojected, " new keypoints created on kf ", key_f.kfID, "from tracking frame", current_f.id)
return new_matches
def obs_to_3d(self, u, v, d):
return np.array([(u - self.cx) / self.fx * d, (v - self.cy) / self.fy * d, d])
def obs_to_stereo(self, u, v, d):
return np.array([u, u - self.bf / d, v])
def loop_closure_teaser(self, new_kf, loop_kf):
# If temporal gap is small, disregard this candidate
if new_kf.kfID - loop_kf.kfID < 10:
return
# Calculate the similarity score and compare with neighbors
new_score = Keyframe.kfdb.score_l1(new_kf.bow, loop_kf.bow, normalize=True)
candidate_kf = loop_kf
for kf, _, _ in loop_kf.neighbors:
neighbor_score = Keyframe.kfdb.score_l1(new_kf.bow, kf.bow, normalize=True)
if neighbor_score > new_score:
new_score = neighbor_score
candidate_kf = kf
loop_kf = candidate_kf
if loop_kf in new_kf.neighbors_list():
return
min_score = 1
for _, _, score in loop_kf.neighbors:
min_score = min(min_score, score)
if min_score > new_score:
return
# If the absolute positions and orientations are not close enough, return
# if np.linalg.norm(new_kf.pos()-loop_kf.pos()) > 2 or \
# rot_to_angle(np.matmul(new_kf.rot(), loop_kf.rot().T)) > 20/180*np.pi*np.sqrt(2):
# return
# Find matches, and return if few matches
matches = self.matcher.match(loop_kf.des, new_kf.des)
if len(matches) < 100:
return
src = np.zeros((3, len(matches)), dtype=float)
dst = np.zeros((3, len(matches)), dtype=float)
for idx, p in enumerate(matches):
src[:, idx] = self.obs_to_3d(new_kf.fp[p.trainIdx].pt[0], new_kf.fp[p.trainIdx].pt[1],
new_kf.depth[p.trainIdx])
dst[:, idx] = self.obs_to_3d(loop_kf.fp[p.queryIdx].pt[0], loop_kf.fp[p.queryIdx].pt[1],
loop_kf.depth[p.queryIdx])
optim = PoseOptimizerTeaser()
pose = optim.optimize(src, dst)
rot = pose[0:3, 0:3]
trans = pose[0:3, 3]
errors = dst-rot.dot(src)-trans.reshape((3, 1))
outliers = []
n_outliers = 0
n_inliers = 0
for idx in range(len(matches)):
if | np.linalg.norm(errors[:, idx]) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""Orientation models."""
import numpy as np
from .closures import compute_closure
def jeffery_ode(a, t, xi, L, closure="IBOF", **kwargs):
"""ODE describing Jeffery's model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>
'The motion of ellipsoidal particles immersed in a viscous fluid',
Proceedings of the Royal Society A, 1922.
https://doi.org/10.1098/rspa.1922.0078
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
return dadt.ravel()
def folgar_tucker_ode(a, t, xi, L, Ci=0.0, closure="IBOF", **kwargs):
"""ODE describing the Folgar-Tucker model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME> III,
'Orientation behavior of fibers in concentrated suspensions',
Journal of Reinforced Plastic Composites 3, 98-119, 1984.
https://doi.org/10.1177%2F073168448400300201
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
+ 2 * Ci * G * (delta - 3 * a)
)
return dadt.ravel()
def maier_saupe_ode(a, t, xi, L, Ci=0.0, U0=0.0, closure="IBOF", **kwargs):
"""ODE using Folgar-Tucker constant and Maier-Saupe potential.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.1).
U0 : float
Maier-Saupe Potential (in 3D stable for y U0 < 8 Ci).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
'Comparative numerical study of two concentrated fiber suspension models',
Journal of Non-Newtonian Fluid Mechanics 165, 764-781, 2010.
https://doi.org/10.1016/j.jnnfm.2010.04.001
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
+ 2
* G
* (
Ci * (delta - 3 * a)
+ U0
* (np.einsum("ik,kj->ij", a, a) - np.einsum("ijkl,kl->ij", A, a))
)
)
return dadt.ravel()
def iard_ode(a, t, xi, L, Ci=0.0, Cm=0.0, closure="IBOF", **kwargs):
"""ODE describing iARD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Cm : float
Anisotropy factor (0 < Cm < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'An objective tensor to predict anisotropic fiber orientation in concentrated suspensions',
Journal of Rheology 60, 215, 2016.
https://doi.org/10.1122/1.4939098
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
D2 = np.einsum("ik,kj->ij", D, D)
D2_norm = np.sqrt(1.0 / 2.0 * np.einsum("ij,ij", D2, D2))
Dr = Ci * (delta - Cm * D2 / D2_norm)
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_iard = G * (
2 * Dr
- 2 * np.trace(Dr) * a
- 5 * np.einsum("ik,kj->ij", Dr, a)
- 5 * np.einsum("ik,kj->ij", a, Dr)
+ 10 * np.einsum("ijkl,kl->ij", A, Dr)
)
dadt = dadt_HD + dadt_iard
return dadt.ravel()
def iardrpr_ode(
a, t, xi, L, Ci=0.0, Cm=0.0, alpha=0.0, beta=0.0, closure="IBOF", **kwargs
):
"""ODE describing iARD-RPR model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Cm : float
Anisotropy factor (0 < Cm < 1).
alpha : float
Retardance rate (0 < alpha < 1).
beta : float
Retardance tuning factor (0< beta < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'An objective tensor to predict anisotropic fiber orientation in concentrated suspensions',
Journal of Rheology 60, 215, 2016.
https://doi.org/10.1122/1.4939098
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
D2 = np.einsum("ik,kj->ij", D, D)
D2_norm = np.sqrt(1.0 / 2.0 * np.einsum("ij,ij", D2, D2))
Dr = Ci * (delta - Cm * D2 / D2_norm)
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2.0 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_iard = G * (
2.0 * Dr
- 2.0 * np.trace(Dr) * a
- 5.0 * np.einsum("ik,kj->ij", Dr, a)
- 5.0 * np.einsum("ik,kj->ij", a, Dr)
+ 10.0 * np.einsum("ijkl,kl->ij", A, Dr)
)
dadt_temp = dadt_HD + dadt_iard
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
# Estimation of eigenvalue rates (rotated back)
dadt_diag = np.einsum("ik, kl, lj->ij", np.transpose(R), dadt_temp, R)
lbd0 = dadt_diag[0, 0]
lbd1 = dadt_diag[1, 1]
lbd2 = dadt_diag[2, 2]
# Computation of IOK tensor by rotation
IOK = np.zeros((3, 3))
IOK[0, 0] = alpha * (lbd0 - beta * (lbd0 ** 2.0 + 2.0 * lbd1 * lbd2))
IOK[1, 1] = alpha * (lbd1 - beta * (lbd1 ** 2.0 + 2.0 * lbd0 * lbd2))
IOK[2, 2] = alpha * (lbd2 - beta * (lbd2 ** 2.0 + 2.0 * lbd0 * lbd1))
dadt_rpr = -np.einsum("ik, kl, lj->ij", R, IOK, np.transpose(R))
dadt = dadt_temp + dadt_rpr
return dadt.ravel()
def mrd_ode(
a, t, xi, L, Ci=0.0, D1=1.0, D2=0.8, D3=0.15, closure="IBOF", **kwargs
):
"""ODE describing MRD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
D1 : type
Anisotropy factors (D1 > 0).
D2 : type
Anisotropy factors (D2 > 0).
D3 : type
Anisotropy factors (D3 > 0).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>,
'Using New Anisotropic Rotational Diffusion Model To Improve Prediction Of Short
Fibers in Thermoplastic InjectionMolding',
ANTEC, Orlando, 2018.
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
C_hat = np.array([[D1, 0.0, 0.0], [0.0, D2, 0.0], [0.0, 0.0, D3]])
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
C = Ci * np.einsum("ij,jk,kl->il", R, C_hat, np.transpose(R))
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_mrd = G * (
2 * C
- 2 * np.trace(C) * a
- 5 * np.einsum("ik,kj->ij", C, a)
- 5 * np.einsum("ik,kj->ij", a, C)
+ 10 * np.einsum("ijkl,kl->ij", A, C)
)
dadt = dadt_HD + dadt_mrd
return dadt.ravel()
def pard_ode(a, t, xi, L, Ci=0.0, Omega=0.0, closure="IBOF", **kwargs):
"""ODE describing pARD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Omega : type
Anisotropy factor (0.5 < Omega < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'The use of principal spatial tensor to predict anisotropic fiber orientation in
concentrated fiber suspensions',
Journal of Rheology 62, 313, 2017.
https://doi.org/10.1122/1.4998520
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
C_hat = np.array(
[[1.0, 0.0, 0.0], [0.0, Omega, 0.0], [0.0, 0.0, 1.0 - Omega]]
)
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
C = Ci * np.einsum("ij,jk,kl->il", R, C_hat, np.transpose(R))
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_pard = G * (
2 * C
- 2 * np.trace(C) * a
- 5 * np.einsum("ik,kj->ij", C, a)
- 5 * np.einsum("ik,kj->ij", a, C)
+ 10 * np.einsum("ijkl,kl->ij", A, C)
)
dadt = dadt_HD + dadt_pard
return dadt.ravel()
def pardrpr_ode(
a, t, xi, L, Ci=0.0, Omega=0.0, alpha=0.0, closure="IBOF", **kwargs
):
"""ODE describing pARD-RPR model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Omega : type
Anisotropy factor (0.5 < Omega < 1).
alpha : float
Retardance rate (0 < alpha < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'The use of principal spatial tensor to predict anisotropic fiber orientation in
concentrated fiber suspensions',
Journal of Rheology 62, 313, 2017.
https://doi.org/10.1122/1.4998520
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
C_hat = np.array(
[[1.0, 0.0, 0.0], [0.0, Omega, 0.0], [0.0, 0.0, 1.0 - Omega]]
)
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
C = Ci * np.einsum("ij,jk,kl->il", R, C_hat, np.transpose(R))
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_pard = G * (
2 * C
- 2 * np.trace(C) * a
- 5 * np.einsum("ik,kj->ij", C, a)
- 5 * np.einsum("ik,kj->ij", a, C)
+ 10 * np.einsum("ijkl,kl->ij", A, C)
)
dadt_temp = dadt_HD + dadt_pard
# Estimation of eigenvalue rates (rotated back)
dadt_diag = np.einsum("ik, kl, lj->ij", np.transpose(R), dadt_temp, R)
lbd0 = dadt_diag[0, 0]
lbd1 = dadt_diag[1, 1]
lbd2 = dadt_diag[2, 2]
# Computation of IOK tensor by rotation
IOK = np.zeros((3, 3))
IOK[0, 0] = alpha * lbd0
IOK[1, 1] = alpha * lbd1
IOK[2, 2] = alpha * lbd2
dadt_rpr = -np.einsum("ik, kl, lj->ij", R, IOK, np.transpose(R))
dadt = dadt_temp + dadt_rpr
return dadt.ravel()
def rsc_ode(a, t, xi, L, Ci=0.0, kappa=1.0, closure="IBOF", **kwargs):
"""ODE describing RSC model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
kappa : float
Strain reduction factor (0 < kappa < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>,
'An objective model for slow orientation kinetics in concentrated fiber suspensions:
Theory and rheological evidence',
Journal of Rheology 52, 1179, 2008.
https://doi.org/10.1122/1.2946437
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
w, v = np.linalg.eig(a)
L = (
w[0] * np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ w[1] * np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ w[2] * np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
M = (
np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
tensor4 = A + (1.0 - kappa) * (L - np.einsum("ijmn,mnkl->ijkl", M, A))
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", tensor4, D)
)
+ 2 * kappa * Ci * G * (delta - 3 * a)
)
return dadt.ravel()
def ard_rsc_ode(
a,
t,
xi,
L,
b1=0.0,
kappa=1.0,
b2=0,
b3=0,
b4=0,
b5=0,
closure="IBOF",
**kwargs
):
"""ODE describing ARD-RSC model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
b1 : float
First parameter of rotary diffusion tensor (0 < b1 < 0.1).
kappa : float
Strain reduction factor (0 < kappa < 1).
b2 : type
Second parameter of rotary diffusion tensor.
b3 : type
Third parameter of rotary diffusion tensor.
b4 : type
Fourth parameter of rotary diffusion tensor.
b5 : type
Fith parameter of rotary diffusion tensor.
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>,
'An anisotropic rotary diffusion model for fiber orientation in short- and
long-fiber thermoplastics',
Journal of Non-Newtonian Fluid Mechanics 156, 165-176, 2009.
https://doi.org/10.1016/j.jnnfm.2008.08.002
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
w, v = np.linalg.eig(a)
L = (
w[0] * np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ w[1] * np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ w[2] * np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
M = (
np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
if G > 0.0:
C = (
b1 * delta
+ b2 * a
+ b3 * np.einsum("ik,kj->ij", a, a)
+ b4 * D / G
+ b5 * np.einsum("ik,kj->ij", D, D) / (G * G)
)
else:
C = np.eye(3)
tensor4 = A + (1.0 - kappa) * (L - np.einsum("ijmn,mnkl->ijkl", M, A))
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", tensor4, D)
)
+ G
* (
2 * (C - (1 - kappa) * np.einsum("ijkl,kl->ij", M, C))
- 2 * kappa * np.trace(C) * a
- 5 * ( | np.einsum("ik,kj->ij", C, a) | numpy.einsum |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for intersecting B |eacute| zier curves via geometric methods.
The functions here are pure Python and many have equivalent implementations
written in Fortran and exposed via a Cython wrapper.
.. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE
:trim:
"""
import itertools
import numpy as np
from bezier.hazmat import curve_helpers
from bezier.hazmat import helpers as _py_helpers
from bezier.hazmat import intersection_helpers
# Set the threshold for exponent at half the bits available, this way one round
# of Newton's method can (usually) finish the job by squaring the error.
_ERROR_VAL = 0.5 ** 26
_MAX_INTERSECT_SUBDIVISIONS = 20
_MAX_CANDIDATES = 64
_UNHANDLED_LINES = (
"If both curves are lines, the intersection should have "
"been computed already."
)
_TOO_MANY_TEMPLATE = (
"The number of candidate intersections is too high.\n"
"{:d} candidate pairs."
)
_NO_CONVERGE_TEMPLATE = (
"Curve intersection failed to converge to approximately linear "
"subdivisions after {:d} iterations."
)
_MIN_INTERVAL_WIDTH = 0.5 ** 40
def bbox_intersect(nodes1, nodes2):
r"""Bounding box intersection predicate.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Determines if the bounding box of two sets of control points
intersects in :math:`\mathbf{R}^2` with non-trivial
intersection (i.e. tangent bounding boxes are insufficient).
.. note::
Though we assume (and the code relies on this fact) that
the nodes are two-dimensional, we don't check it.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier shape.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier shape.
Returns:
int: Enum from :class:`.BoxIntersectionType` indicating the type of
bounding box intersection.
"""
left1, right1, bottom1, top1 = _py_helpers.bbox(nodes1)
left2, right2, bottom2, top2 = _py_helpers.bbox(nodes2)
if right2 < left1 or right1 < left2 or top2 < bottom1 or top1 < bottom2:
return BoxIntersectionType.DISJOINT
if (
right2 == left1
or right1 == left2
or top2 == bottom1
or top1 == bottom2
):
return BoxIntersectionType.TANGENT
else:
return BoxIntersectionType.INTERSECTION
def linearization_error(nodes):
r"""Compute the maximum error of a linear approximation.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
.. note::
This is a helper for :class:`.Linearization`, which is used during the
curve-curve intersection process.
We use the line
.. math::
L(s) = v_0 (1 - s) + v_n s
and compute a bound on the maximum error
.. math::
\max_{s \in \left[0, 1\right]} \|B(s) - L(s)\|_2.
Rather than computing the actual maximum (a tight bound), we
use an upper bound via the remainder from Lagrange interpolation
in each component. This leaves us with :math:`\frac{s(s - 1)}{2!}`
times the second derivative in each component.
The second derivative curve is degree :math:`d = n - 2` and
is given by
.. math::
B''(s) = n(n - 1) \sum_{j = 0}^{d} \binom{d}{j} s^j
(1 - s)^{d - j} \cdot \Delta^2 v_j
Due to this form (and the convex combination property of
B |eacute| zier Curves) we know each component of the second derivative
will be bounded by the maximum of that component among the
:math:`\Delta^2 v_j`.
For example, the curve
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^2
+ \left[\begin{array}{c} 3 \\ 1 \end{array}\right] 2s(1 - s)
+ \left[\begin{array}{c} 9 \\ -2 \end{array}\right] s^2
has
:math:`B''(s) \equiv \left[\begin{array}{c} 6 \\ -8 \end{array}\right]`
which has norm :math:`10` everywhere, hence the maximum error is
.. math::
\left.\frac{s(1 - s)}{2!} \cdot 10\right|_{s = \frac{1}{2}}
= \frac{5}{4}.
.. image:: ../../images/linearization_error.png
:align: center
.. testsetup:: linearization-error, linearization-error-fail
import numpy as np
import bezier
from bezier.hazmat.geometric_intersection import linearization_error
.. doctest:: linearization-error
>>> nodes = np.asfortranarray([
... [0.0, 3.0, 9.0],
... [0.0, 1.0, -2.0],
... ])
>>> linearization_error(nodes)
1.25
.. testcleanup:: linearization-error
import make_images
make_images.linearization_error(nodes)
As a **non-example**, consider a "pathological" set of control points:
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^3
+ \left[\begin{array}{c} 5 \\ 12 \end{array}\right] 3s(1 - s)^2
+ \left[\begin{array}{c} 10 \\ 24 \end{array}\right] 3s^2(1 - s)
+ \left[\begin{array}{c} 30 \\ 72 \end{array}\right] s^3
By construction, this lies on the line :math:`y = \frac{12x}{5}`, but
the parametrization is cubic:
:math:`12 \cdot x(s) = 5 \cdot y(s) = 180s(s^2 + 1)`. Hence, the fact
that the curve is a line is not accounted for and we take the worse
case among the nodes in:
.. math::
B''(s) = 3 \cdot 2 \cdot \left(
\left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)
+ \left[\begin{array}{c} 15 \\ 36 \end{array}\right] s\right)
which gives a nonzero maximum error:
.. doctest:: linearization-error-fail
>>> nodes = np.asfortranarray([
... [0.0, 5.0, 10.0, 30.0],
... [0.0, 12.0, 24.0, 72.0],
... ])
>>> linearization_error(nodes)
29.25
Though it may seem that ``0`` is a more appropriate answer, consider
the **goal** of this function. We seek to linearize curves and then
intersect the linear approximations. Then the :math:`s`-values from
the line-line intersection is lifted back to the curves. Thus
the error :math:`\|B(s) - L(s)\|_2` is more relevant than the
underyling algebraic curve containing :math:`B(s)`.
.. note::
It may be more appropriate to use a **relative** linearization error
rather than the **absolute** error provided here. It's unclear if
the domain :math:`\left[0, 1\right]` means the error is **already**
adequately scaled or if the error should be scaled by the arc
length of the curve or the (easier-to-compute) length of the line.
Args:
nodes (numpy.ndarray): Nodes of a curve.
Returns:
float: The maximum error between the curve and the
linear approximation.
"""
_, num_nodes = nodes.shape
degree = num_nodes - 1
if degree == 1:
return 0.0
second_deriv = nodes[:, :-2] - 2.0 * nodes[:, 1:-1] + nodes[:, 2:]
worst_case = np.max(np.abs(second_deriv), axis=1)
# max_{0 <= s <= 1} s(1 - s)/2 = 1/8 = 0.125
multiplier = 0.125 * degree * (degree - 1)
# NOTE: worst_case is 1D due to np.max(), so this is the vector norm.
return multiplier * np.linalg.norm(worst_case, ord=2)
def segment_intersection(start0, end0, start1, end1):
r"""Determine the intersection of two line segments.
Assumes each line is parametric
.. math::
\begin{alignat*}{2}
L_0(s) &= S_0 (1 - s) + E_0 s &&= S_0 + s \Delta_0 \\
L_1(t) &= S_1 (1 - t) + E_1 t &&= S_1 + t \Delta_1.
\end{alignat*}
To solve :math:`S_0 + s \Delta_0 = S_1 + t \Delta_1`, we use the
cross product:
.. math::
\left(S_0 + s \Delta_0\right) \times \Delta_1 =
\left(S_1 + t \Delta_1\right) \times \Delta_1 \Longrightarrow
s \left(\Delta_0 \times \Delta_1\right) =
\left(S_1 - S_0\right) \times \Delta_1.
Similarly
.. math::
\Delta_0 \times \left(S_0 + s \Delta_0\right) =
\Delta_0 \times \left(S_1 + t \Delta_1\right) \Longrightarrow
\left(S_1 - S_0\right) \times \Delta_0 =
\Delta_0 \times \left(S_0 - S_1\right) =
t \left(\Delta_0 \times \Delta_1\right).
.. note::
Since our points are in :math:`\mathbf{R}^2`, the "traditional"
cross product in :math:`\mathbf{R}^3` will always point in the
:math:`z` direction, so in the above we mean the :math:`z`
component of the cross product, rather than the entire vector.
For example, the diagonal lines
.. math::
\begin{align*}
L_0(s) &= \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s) +
\left[\begin{array}{c} 2 \\ 2 \end{array}\right] s \\
L_1(t) &= \left[\begin{array}{c} -1 \\ 2 \end{array}\right] (1 - t) +
\left[\begin{array}{c} 1 \\ 0 \end{array}\right] t
\end{align*}
intersect at :math:`L_0\left(\frac{1}{4}\right) =
L_1\left(\frac{3}{4}\right) =
\frac{1}{2} \left[\begin{array}{c} 1 \\ 1 \end{array}\right]`.
.. image:: ../../images/segment_intersection1.png
:align: center
.. testsetup:: segment-intersection1, segment-intersection2
import numpy as np
from bezier.hazmat.geometric_intersection import segment_intersection
.. doctest:: segment-intersection1
:options: +NORMALIZE_WHITESPACE
>>> start0 = np.asfortranarray([0.0, 0.0])
>>> end0 = np.asfortranarray([2.0, 2.0])
>>> start1 = np.asfortranarray([-1.0, 2.0])
>>> end1 = np.asfortranarray([1.0, 0.0])
>>> s, t, _ = segment_intersection(start0, end0, start1, end1)
>>> s
0.25
>>> t
0.75
.. testcleanup:: segment-intersection1
import make_images
make_images.segment_intersection1(start0, end0, start1, end1, s)
Taking the parallel (but different) lines
.. math::
\begin{align*}
L_0(s) &= \left[\begin{array}{c} 1 \\ 0 \end{array}\right] (1 - s) +
\left[\begin{array}{c} 0 \\ 1 \end{array}\right] s \\
L_1(t) &= \left[\begin{array}{c} -1 \\ 3 \end{array}\right] (1 - t) +
\left[\begin{array}{c} 3 \\ -1 \end{array}\right] t
\end{align*}
we should be able to determine that the lines don't intersect, but
this function is not meant for that check:
.. image:: ../../images/segment_intersection2.png
:align: center
.. doctest:: segment-intersection2
:options: +NORMALIZE_WHITESPACE
>>> start0 = np.asfortranarray([1.0, 0.0])
>>> end0 = np.asfortranarray([0.0, 1.0])
>>> start1 = np.asfortranarray([-1.0, 3.0])
>>> end1 = np.asfortranarray([3.0, -1.0])
>>> _, _, success = segment_intersection(start0, end0, start1, end1)
>>> success
False
.. testcleanup:: segment-intersection2
import make_images
make_images.segment_intersection2(start0, end0, start1, end1)
Instead, we use :func:`parallel_lines_parameters`:
.. testsetup:: segment-intersection2-continued
import numpy as np
from bezier.hazmat.geometric_intersection import (
parallel_lines_parameters
)
start0 = np.asfortranarray([1.0, 0.0])
end0 = np.asfortranarray([0.0, 1.0])
start1 = np.asfortranarray([-1.0, 3.0])
end1 = np.asfortranarray([3.0, -1.0])
.. doctest:: segment-intersection2-continued
>>> disjoint, _ = parallel_lines_parameters(start0, end0, start1, end1)
>>> disjoint
True
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
start0 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_0` of the parametric line :math:`L_0(s)`.
end0 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_0` of the parametric line :math:`L_0(s)`.
start1 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_1` of the parametric line :math:`L_1(s)`.
end1 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_1` of the parametric line :math:`L_1(s)`.
Returns:
Tuple[float, float, bool]: Pair of :math:`s_{\ast}` and
:math:`t_{\ast}` such that the lines intersect:
:math:`L_0\left(s_{\ast}\right) = L_1\left(t_{\ast}\right)` and then
a boolean indicating if an intersection was found (i.e. if the lines
aren't parallel).
"""
delta0 = end0 - start0
delta1 = end1 - start1
cross_d0_d1 = _py_helpers.cross_product(delta0, delta1)
if cross_d0_d1 == 0.0:
return None, None, False
else:
start_delta = start1 - start0
s = _py_helpers.cross_product(start_delta, delta1) / cross_d0_d1
t = _py_helpers.cross_product(start_delta, delta0) / cross_d0_d1
return s, t, True
def parallel_lines_parameters(start0, end0, start1, end1):
r"""Checks if two parallel lines ever meet.
Meant as a back-up when :func:`segment_intersection` fails.
.. note::
This function assumes but never verifies that the lines
are parallel.
In the case that the segments are parallel and lie on **different**
lines, then there is a **guarantee** of no intersection. However, if
they are on the exact same line, they may define a shared segment
coincident to both lines.
In :func:`segment_intersection`, we utilized the normal form of the
lines (via the cross product):
.. math::
\begin{align*}
L_0(s) \times \Delta_0 &\equiv S_0 \times \Delta_0 \\
L_1(t) \times \Delta_1 &\equiv S_1 \times \Delta_1
\end{align*}
So, we can detect if :math:`S_1` is on the first line by
checking if
.. math::
S_0 \times \Delta_0 \stackrel{?}{=} S_1 \times \Delta_0.
If it is not on the first line, then we are done, the
segments don't meet:
.. image:: ../../images/parallel_lines_parameters1.png
:align: center
.. testsetup:: parallel-different1, parallel-different2
import numpy as np
from bezier.hazmat.geometric_intersection import (
parallel_lines_parameters
)
.. doctest:: parallel-different1
>>> # Line: y = 1
>>> start0 = np.asfortranarray([0.0, 1.0])
>>> end0 = np.asfortranarray([1.0, 1.0])
>>> # Vertical shift up: y = 2
>>> start1 = np.asfortranarray([-1.0, 2.0])
>>> end1 = np.asfortranarray([3.0, 2.0])
>>> disjoint, _ = parallel_lines_parameters(start0, end0, start1, end1)
>>> disjoint
True
.. testcleanup:: parallel-different1
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters1.png")
If :math:`S_1` **is** on the first line, we want to check that
:math:`S_1` and :math:`E_1` define parameters outside of
:math:`\left[0, 1\right]`. To compute these parameters:
.. math::
L_1(t) = S_0 + s_{\ast} \Delta_0 \Longrightarrow
s_{\ast} = \frac{\Delta_0^T \left(
L_1(t) - S_0\right)}{\Delta_0^T \Delta_0}.
For example, the intervals :math:`\left[0, 1\right]` and
:math:`\left[\frac{3}{2}, 2\right]` (via
:math:`S_1 = S_0 + \frac{3}{2} \Delta_0` and
:math:`E_1 = S_0 + 2 \Delta_0`) correspond to segments that
don't meet:
.. image:: ../../images/parallel_lines_parameters2.png
:align: center
.. doctest:: parallel-different2
>>> start0 = np.asfortranarray([1.0, 0.0])
>>> delta0 = np.asfortranarray([2.0, -1.0])
>>> end0 = start0 + 1.0 * delta0
>>> start1 = start0 + 1.5 * delta0
>>> end1 = start0 + 2.0 * delta0
>>> disjoint, _ = parallel_lines_parameters(start0, end0, start1, end1)
>>> disjoint
True
.. testcleanup:: parallel-different2
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters2.png")
but if the intervals overlap, like :math:`\left[0, 1\right]` and
:math:`\left[-1, \frac{1}{2}\right]`, the segments meet:
.. image:: ../../images/parallel_lines_parameters3.png
:align: center
.. testsetup:: parallel-different3, parallel-different4
import numpy as np
from bezier.hazmat.geometric_intersection import (
parallel_lines_parameters
)
start0 = np.asfortranarray([1.0, 0.0])
delta0 = np.asfortranarray([2.0, -1.0])
end0 = start0 + 1.0 * delta0
.. doctest:: parallel-different3
>>> start1 = start0 - 1.5 * delta0
>>> end1 = start0 + 0.5 * delta0
>>> disjoint, parameters = parallel_lines_parameters(
... start0, end0, start1, end1)
>>> disjoint
False
>>> parameters
array([[0. , 0.5 ],
[0.75, 1. ]])
.. testcleanup:: parallel-different3
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters3.png")
Similarly, if the second interval completely contains the first,
the segments meet:
.. image:: ../../images/parallel_lines_parameters4.png
:align: center
.. doctest:: parallel-different4
>>> start1 = start0 + 4.5 * delta0
>>> end1 = start0 - 3.5 * delta0
>>> disjoint, parameters = parallel_lines_parameters(
... start0, end0, start1, end1)
>>> disjoint
False
>>> parameters
array([[1. , 0. ],
[0.4375, 0.5625]])
.. testcleanup:: parallel-different4
import make_images
make_images.helper_parallel_lines(
start0, end0, start1, end1, "parallel_lines_parameters4.png")
.. note::
This function doesn't currently allow wiggle room around the
desired value, i.e. the two values must be bitwise identical.
However, the most "correct" version of this function likely
should allow for some round off.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
start0 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_0` of the parametric line :math:`L_0(s)`.
end0 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_0` of the parametric line :math:`L_0(s)`.
start1 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector :math:`S_1` of the parametric line :math:`L_1(s)`.
end1 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector :math:`E_1` of the parametric line :math:`L_1(s)`.
Returns:
Tuple[bool, Optional[numpy.ndarray]]: A pair of
* Flag indicating if the lines are disjoint.
* An optional ``2 x 2`` matrix of ``s-t`` parameters only present if
the lines aren't disjoint. The first column will contain the
parameters at the beginning of the shared segment and the second
column will correspond to the end of the shared segment.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-branches
delta0 = end0 - start0
line0_const = _py_helpers.cross_product(start0, delta0)
start1_against = _py_helpers.cross_product(start1, delta0)
if line0_const != start1_against:
return True, None
# Each array is a 1D vector, so we can use the vector dot product.
norm0_sq = np.vdot(delta0, delta0)
# S1 = L1(0) = S0 + sA D0
# <==> sA D0 = S1 - S0
# ==> sA (D0^T D0) = D0^T (S1 - S0)
s_val0 = np.vdot(start1 - start0, delta0) / norm0_sq
# E1 = L1(1) = S0 + sB D0
# <==> sB D0 = E1 - S0
# ==> sB (D0^T D0) = D0^T (E1 - S0)
s_val1 = np.vdot(end1 - start0, delta0) / norm0_sq
# s = s_val0 + t (s_val1 - s_val0)
# t = 0 <==> s = s_val0
# t = 1 <==> s = s_val1
# t = -s_val0 / (s_val1 - s_val0) <==> s = 0
# t = (1 - s_val0) / (s_val1 - s_val0) <==> s = 1
if s_val0 <= s_val1:
# In this branch the segments are moving in the same direction, i.e.
# (t=0<-->s=s_val0) are both less than (t=1<-->s_val1).
if 1.0 < s_val0:
return True, None
elif s_val0 < 0.0:
start_s = 0.0
start_t = -s_val0 / (s_val1 - s_val0)
else:
start_s = s_val0
start_t = 0.0
if s_val1 < 0.0:
return True, None
elif 1.0 < s_val1:
end_s = 1.0
end_t = (1.0 - s_val0) / (s_val1 - s_val0)
else:
end_s = s_val1
end_t = 1.0
else:
# In this branch the segments are moving in opposite directions, i.e.
# in (t=0<-->s=s_val0) and (t=1<-->s_val1) we have 0 < 1
# but ``s_val0 > s_val1``.
if s_val0 < 0.0:
return True, None
elif 1.0 < s_val0:
start_s = 1.0
start_t = (s_val0 - 1.0) / (s_val0 - s_val1)
else:
start_s = s_val0
start_t = 0.0
if 1.0 < s_val1:
return True, None
elif s_val1 < 0.0:
end_s = 0.0
end_t = s_val0 / (s_val0 - s_val1)
else:
end_s = s_val1
end_t = 1.0
parameters = np.asfortranarray([[start_s, end_s], [start_t, end_t]])
return False, parameters
def line_line_collide(line1, line2):
"""Determine if two line segments meet.
This is a helper for :func:`convex_hull_collide` in the
special case that the two convex hulls are actually
just line segments. (Even in this case, this is only
problematic if both segments are on a single line.)
Args:
line1 (numpy.ndarray): ``2 x 2`` array of start and end nodes.
line2 (numpy.ndarray): ``2 x 2`` array of start and end nodes.
Returns:
bool: Indicating if the line segments collide.
"""
s, t, success = segment_intersection(
line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1]
)
if success:
return _py_helpers.in_interval(
s, 0.0, 1.0
) and _py_helpers.in_interval(t, 0.0, 1.0)
else:
disjoint, _ = parallel_lines_parameters(
line1[:, 0], line1[:, 1], line2[:, 0], line2[:, 1]
)
return not disjoint
def convex_hull_collide(nodes1, nodes2):
"""Determine if the convex hulls of two curves collide.
.. note::
This is a helper for :func:`from_linearized`.
Args:
nodes1 (numpy.ndarray): Control points of a first curve.
nodes2 (numpy.ndarray): Control points of a second curve.
Returns:
bool: Indicating if the convex hulls collide.
"""
polygon1 = _py_helpers.simple_convex_hull(nodes1)
_, polygon_size1 = polygon1.shape
polygon2 = _py_helpers.simple_convex_hull(nodes2)
_, polygon_size2 = polygon2.shape
if polygon_size1 == 2 and polygon_size2 == 2:
return line_line_collide(polygon1, polygon2)
else:
return _py_helpers.polygon_collide(polygon1, polygon2)
def from_linearized(first, second, intersections):
"""Determine curve-curve intersection from pair of linearizations.
.. note::
This assumes that at least one of ``first`` and ``second`` is
not a line. The line-line case should be handled "early"
by :func:`check_lines`.
.. note::
This assumes the caller has verified that the bounding boxes
for ``first`` and ``second`` actually intersect.
If there is an intersection along the segments, adds that intersection
to ``intersections``. Otherwise, returns without doing anything.
Args:
first (Linearization): First curve being intersected.
second (Linearization): Second curve being intersected.
intersections (list): A list of existing intersections.
Raises:
ValueError: If ``first`` and ``second`` both have linearization error
of ``0.0`` (i.e. they are both lines). This is because this
function expects the caller to have used :func:`check_lines`
already.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-return-statements
s, t, success = segment_intersection(
first.start_node, first.end_node, second.start_node, second.end_node
)
bad_parameters = False
if success:
if not (
_py_helpers.in_interval(s, 0.0, 1.0)
and _py_helpers.in_interval(t, 0.0, 1.0)
):
bad_parameters = True
else:
if first.error == 0.0 and second.error == 0.0:
raise ValueError(_UNHANDLED_LINES)
# Just fall back to a Newton iteration starting in the middle of
# the given intervals.
bad_parameters = True
s = 0.5
t = 0.5
if bad_parameters:
# In the unlikely case that we have parallel segments or segments
# that intersect outside of [0, 1] x [0, 1], we can still exit
# if the convex hulls don't intersect.
if not convex_hull_collide(first.curve.nodes, second.curve.nodes):
return
# Now, promote ``s`` and ``t`` onto the original curves.
orig_s = (1 - s) * first.curve.start + s * first.curve.end
orig_t = (1 - t) * second.curve.start + t * second.curve.end
refined_s, refined_t = intersection_helpers.full_newton(
orig_s, first.curve.original_nodes, orig_t, second.curve.original_nodes
)
refined_s, success = _py_helpers.wiggle_interval(refined_s)
if not success:
return
refined_t, success = _py_helpers.wiggle_interval(refined_t)
if not success:
return
add_intersection(refined_s, refined_t, intersections)
def add_intersection(s, t, intersections):
r"""Adds an intersection to list of ``intersections``.
.. note::
This is a helper for :func:`from_linearized` and :func:`endpoint_check`.
These functions are used (directly or indirectly) by
:func:`all_intersections` exclusively, and that function has a
Fortran equivalent.
Accounts for repeated intersection points. If the intersection has already
been found, does nothing.
If ``s`` is below :math:`2^{-10}`, it will be replaced with ``1 - s``
and compared against ``1 - s'`` for all ``s'`` already in
``intersections``. (Similar if ``t`` is below the
:attr:`~bezier.hazmat.intersection_helpers.ZERO_THRESHOLD`.)
This is perfectly "appropriate" since evaluating a B |eacute| zier curve
requires using both ``s`` and ``1 - s``, so both values are equally
relevant.
Compares :math:`\|p - q\|` to :math:`\|p\|` where :math:`p = (s, t)` is
current candidate intersection (or the "normalized" version, such as
:math:`p = (1 - s, t)`) and :math:`q` is one of the already added
intersections. If the difference is below :math:`2^{-36}` (i.e.
:attr:`~bezier.hazmat.intersection_helpers.NEWTON_ERROR_RATIO`)
then the intersection is considered to be duplicate.
Args:
s (float): The first parameter in an intersection.
t (float): The second parameter in an intersection.
intersections (list): List of existing intersections.
"""
if not intersections:
intersections.append((s, t))
return
if s < intersection_helpers.ZERO_THRESHOLD:
candidate_s = 1.0 - s
else:
candidate_s = s
if t < intersection_helpers.ZERO_THRESHOLD:
candidate_t = 1.0 - t
else:
candidate_t = t
norm_candidate = np.linalg.norm([candidate_s, candidate_t], ord=2)
for existing_s, existing_t in intersections:
# NOTE: |(1 - s1) - (1 - s2)| = |s1 - s2| in exact arithmetic, so
# we just compute ``s1 - s2`` rather than using
# ``candidate_s`` / ``candidate_t``. Due to round-off, these
# differences may be slightly different, but only up to machine
# precision.
delta_s = s - existing_s
delta_t = t - existing_t
norm_update = np.linalg.norm([delta_s, delta_t], ord=2)
if (
norm_update
< intersection_helpers.NEWTON_ERROR_RATIO * norm_candidate
):
return
intersections.append((s, t))
def endpoint_check(
first, node_first, s, second, node_second, t, intersections
):
r"""Check if curve endpoints are identical.
.. note::
This is a helper for :func:`tangent_bbox_intersection`. These
functions are used (directly or indirectly) by
:func:`all_intersections` exclusively, and that function has a
Fortran equivalent.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:`\mathbf{R}^2`).
node_first (numpy.ndarray): 1D ``2``-array, one of the endpoints
of ``first``.
s (float): The parameter corresponding to ``node_first``, so
expected to be one of ``0.0`` or ``1.0``.
second (SubdividedCurve): Second curve being intersected (assumed in
:math:`\mathbf{R}^2`).
node_second (numpy.ndarray): 1D ``2``-array, one of the endpoints
of ``second``.
t (float): The parameter corresponding to ``node_second``, so
expected to be one of ``0.0`` or ``1.0``.
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
if _py_helpers.vector_close(node_first, node_second):
orig_s = (1 - s) * first.start + s * first.end
orig_t = (1 - t) * second.start + t * second.end
add_intersection(orig_s, orig_t, intersections)
def tangent_bbox_intersection(first, second, intersections):
r"""Check if two curves with tangent bounding boxes intersect.
.. note::
This is a helper for :func:`intersect_one_round`. These
functions are used (directly or indirectly) by
:func:`all_intersections` exclusively, and that function has a
Fortran equivalent.
If the bounding boxes are tangent, intersection can
only occur along that tangency.
If the curve is **not** a line, the **only** way the curve can touch
the bounding box is at the endpoints. To see this, consider the
component
.. math::
x(s) = \sum_j W_j x_j.
Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there
is some :math:`k` with :math:`x_k < M = \max x_j`, then for any
interior :math:`s`
.. math::
x(s) < \sum_j W_j M = M.
If all :math:`x_j = M`, then :math:`B(s)` falls on the line
:math:`x = M`. (A similar argument holds for the other three
component-extrema types.)
.. note::
This function assumes callers will not pass curves that can be
linearized / are linear. In :func:`all_intersections`, curves
are pre-processed to do any linearization before the
subdivision / intersection process begins.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:`\mathbf{R}^2`).
second (SubdividedCurve): Second curve being intersected (assumed in
:math:`\mathbf{R}^2`).
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
node_first1 = first.nodes[:, 0]
node_first2 = first.nodes[:, -1]
node_second1 = second.nodes[:, 0]
node_second2 = second.nodes[:, -1]
endpoint_check(
first, node_first1, 0.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first1, 0.0, second, node_second2, 1.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second2, 1.0, intersections
)
def bbox_line_intersect(nodes, line_start, line_end):
r"""Determine intersection of a bounding box and a line.
We do this by first checking if either the start or end node of the
segment are contained in the bounding box. If they aren't, then
checks if the line segment intersects any of the four sides of the
bounding box.
.. note::
This function is "half-finished". It makes no distinction between
"tangent" intersections of the box and segment and other types
of intersection. However, the distinction is worthwhile, so this
function should be "upgraded" at some point.
Args:
nodes (numpy.ndarray): Points (``2 x N``) that determine a
bounding box.
line_start (numpy.ndarray): Beginning of a line segment (1D
``2``-array).
line_end (numpy.ndarray): End of a line segment (1D ``2``-array).
Returns:
int: Enum from :class:`.BoxIntersectionType` indicating the type of
bounding box intersection.
"""
left, right, bottom, top = _py_helpers.bbox(nodes)
if _py_helpers.in_interval(
line_start[0], left, right
) and _py_helpers.in_interval(line_start[1], bottom, top):
return BoxIntersectionType.INTERSECTION
if _py_helpers.in_interval(
line_end[0], left, right
) and _py_helpers.in_interval(line_end[1], bottom, top):
return BoxIntersectionType.INTERSECTION
# NOTE: We allow ``segment_intersection`` to fail below (i.e.
# ``success=False``). At first, this may appear to "ignore"
# some potential intersections of parallel lines. However,
# no intersections will be missed. If parallel lines don't
# overlap, then there is nothing to miss. If they do overlap,
# then either the segment will have endpoints on the box (already
# covered by the checks above) or the segment will contain an
# entire side of the box, which will force it to intersect the 3
# edges that meet at the two ends of those sides. The parallel
# edge will be skipped, but the other two will be covered.
# Bottom Edge
s_bottom, t_bottom, success = segment_intersection(
np.asfortranarray([left, bottom]),
np.asfortranarray([right, bottom]),
line_start,
line_end,
)
if (
success
and _py_helpers.in_interval(s_bottom, 0.0, 1.0)
and _py_helpers.in_interval(t_bottom, 0.0, 1.0)
):
return BoxIntersectionType.INTERSECTION
# Right Edge
s_right, t_right, success = segment_intersection(
np.asfortranarray([right, bottom]),
np.asfortranarray([right, top]),
line_start,
line_end,
)
if (
success
and _py_helpers.in_interval(s_right, 0.0, 1.0)
and _py_helpers.in_interval(t_right, 0.0, 1.0)
):
return BoxIntersectionType.INTERSECTION
# Top Edge
s_top, t_top, success = segment_intersection(
np.asfortranarray([right, top]),
np.asfortranarray([left, top]),
line_start,
line_end,
)
if (
success
and _py_helpers.in_interval(s_top, 0.0, 1.0)
and _py_helpers.in_interval(t_top, 0.0, 1.0)
):
return BoxIntersectionType.INTERSECTION
# NOTE: We skip the "last" edge. This is because any curve
# that doesn't have an endpoint on a curve must cross
# at least two, so we will have already covered such curves
# in one of the branches above.
return BoxIntersectionType.DISJOINT
def intersect_one_round(candidates, intersections):
"""Perform one step of the intersection process.
.. note::
This is a helper for :func:`all_intersections` and that function
has a Fortran equivalent.
Checks if the bounding boxes of each pair in ``candidates``
intersect. If the bounding boxes do not intersect, the pair
is discarded. Otherwise, the pair is "accepted". Then we
attempt to linearize each curve in an "accepted" pair and
track the overall linearization error for every curve
encountered.
Args:
candidates (Union[list, itertools.chain]): An iterable of
pairs of curves (or linearized curves).
intersections (list): A list of already encountered
intersections. If any intersections can be readily determined
during this round of subdivision, then they will be added
to this list.
Returns:
list: Returns a list of the next round of ``candidates``.
"""
next_candidates = []
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
for first, second in candidates:
both_linearized = False
if first.__class__ is Linearization:
if second.__class__ is Linearization:
both_linearized = True
bbox_int = bbox_intersect(
first.curve.nodes, second.curve.nodes
)
else:
bbox_int = bbox_line_intersect(
second.nodes, first.start_node, first.end_node
)
else:
if second.__class__ is Linearization:
bbox_int = bbox_line_intersect(
first.nodes, second.start_node, second.end_node
)
else:
bbox_int = bbox_intersect(first.nodes, second.nodes)
if bbox_int == BoxIntersectionType.DISJOINT:
continue
if bbox_int == BoxIntersectionType.TANGENT and not both_linearized:
# NOTE: Ignore tangent bounding boxes in the linearized case
# because ``tangent_bbox_intersection()`` assumes that both
# curves are not linear.
tangent_bbox_intersection(first, second, intersections)
continue
if both_linearized:
# If both ``first`` and ``second`` are linearizations, then
# we can intersect them immediately.
from_linearized(first, second, intersections)
continue
# If we haven't ``continue``-d, add the accepted pair.
# NOTE: This may be a wasted computation, e.g. if ``first``
# or ``second`` occur in multiple accepted pairs (the caller
# only passes one pair at a time). However, in practice
# the number of such pairs will be small so this cost
# will be low.
lin1 = map(Linearization.from_shape, first.subdivide())
lin2 = map(Linearization.from_shape, second.subdivide())
next_candidates.extend(itertools.product(lin1, lin2))
return next_candidates
def prune_candidates(candidates):
"""Reduce number of candidate intersection pairs.
.. note::
This is a helper for :func:`all_intersections`.
Uses more strict bounding box intersection predicate by forming the
actual convex hull of each candidate curve segment and then checking
if those convex hulls collide.
Args:
candidates (List[Union[SubdividedCurve, Linearization]]): An iterable
of pairs of curves (or linearized curves).
Returns:
List[Union[SubdividedCurve, Linearization]]: A pruned list of curve
pairs.
"""
pruned = []
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
for first, second in candidates:
if first.__class__ is Linearization:
nodes1 = first.curve.nodes
else:
nodes1 = first.nodes
if second.__class__ is Linearization:
nodes2 = second.curve.nodes
else:
nodes2 = second.nodes
if convex_hull_collide(nodes1, nodes2):
pruned.append((first, second))
return pruned
def make_same_degree(nodes1, nodes2):
"""Degree-elevate a curve so two curves have matching degree.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
Returns:
Tuple[numpy.ndarray, numpy.ndarray]: The potentially degree-elevated
nodes passed in.
"""
_, num_nodes1 = nodes1.shape
_, num_nodes2 = nodes2.shape
for _ in range(num_nodes2 - num_nodes1):
nodes1 = curve_helpers.elevate_nodes(nodes1)
for _ in range(num_nodes1 - num_nodes2):
nodes2 = curve_helpers.elevate_nodes(nodes2)
return nodes1, nodes2
def coincident_parameters(nodes1, nodes2):
r"""Check if two B |eacute| zier curves are coincident.
Does so by projecting each segment endpoint onto the other curve
.. math::
B_1(s_0) = B_2(0) \\
B_1(s_m) = B_2(1) \\
B_1(0) = B_2(t_0) \\
B_1(1) = B_2(t_n)
and then finding the "shared interval" where both curves are defined.
If such an interval can't be found (e.g. if one of the endpoints can't be
located on the other curve), returns :data:`None`.
If such a "shared interval" does exist, then this will specialize
each curve onto that shared interval and check if the new control points
agree.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier curve.
Returns:
Optional[Tuple[Tuple[float, float], ...]]: A ``2 x 2`` array of
parameters where the two coincident curves meet. If they are not
coincident, returns :data:`None`.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-return-statements,too-many-branches
nodes1, nodes2 = make_same_degree(nodes1, nodes2)
s_initial = curve_helpers.locate_point(
nodes1, nodes2[:, 0].reshape((2, 1), order="F")
)
s_final = curve_helpers.locate_point(
nodes1, nodes2[:, -1].reshape((2, 1), order="F")
)
if s_initial is not None and s_final is not None:
# In this case, if the curves were coincident, then ``curve2``
# would be "fully" contained in ``curve1``, so we specialize
# ``curve1`` down to that interval to check.
specialized1 = curve_helpers.specialize_curve(
nodes1, s_initial, s_final
)
if _py_helpers.vector_close(
specialized1.ravel(order="F"), nodes2.ravel(order="F")
):
return ((s_initial, 0.0), (s_final, 1.0))
else:
return None
t_initial = curve_helpers.locate_point(
nodes2, nodes1[:, 0].reshape((2, 1), order="F")
)
t_final = curve_helpers.locate_point(
nodes2, nodes1[:, -1].reshape((2, 1), order="F")
)
if t_initial is None and t_final is None:
# An overlap must have two endpoints and since at most one of the
# endpoints of ``curve2`` lies on ``curve1`` (as indicated by at
# least one of the ``s``-parameters being ``None``), we need (at least)
# one endpoint of ``curve1`` on ``curve2``.
return None
if t_initial is not None and t_final is not None:
# In this case, if the curves were coincident, then ``curve1``
# would be "fully" contained in ``curve2``, so we specialize
# ``curve2`` down to that interval to check.
specialized2 = curve_helpers.specialize_curve(
nodes2, t_initial, t_final
)
if _py_helpers.vector_close(
nodes1.ravel(order="F"), specialized2.ravel(order="F")
):
return ((0.0, t_initial), (1.0, t_final))
else:
return None
if s_initial is None and s_final is None:
# An overlap must have two endpoints and since exactly one of the
# endpoints of ``curve1`` lies on ``curve2`` (as indicated by exactly
# one of the ``t``-parameters being ``None``), we need (at least)
# one endpoint of ``curve1`` on ``curve2``.
return None
# At this point, we know exactly one of the ``s``-parameters and exactly
# one of the ``t``-parameters is not ``None``.
if s_initial is None:
if t_initial is None:
# B1(s_final) = B2(1) AND B1(1) = B2(t_final)
start_s = s_final
end_s = 1.0
start_t = 1.0
end_t = t_final
else:
# B1(0) = B2(t_initial) AND B1(s_final) = B2(1)
start_s = 0.0
end_s = s_final
start_t = t_initial
end_t = 1.0
else:
if t_initial is None:
# B1(s_initial) = B2(0) AND B1(1 ) = B2(t_final)
start_s = s_initial
end_s = 1.0
start_t = 0.0
end_t = t_final
else:
# B1(0) = B2(t_initial) AND B1(s_initial) = B2(0)
start_s = 0.0
end_s = s_initial
start_t = t_initial
end_t = 0.0
width_s = abs(start_s - end_s)
width_t = abs(start_t - end_t)
if width_s < _MIN_INTERVAL_WIDTH and width_t < _MIN_INTERVAL_WIDTH:
return None
specialized1 = curve_helpers.specialize_curve(nodes1, start_s, end_s)
specialized2 = curve_helpers.specialize_curve(nodes2, start_t, end_t)
if _py_helpers.vector_close(
specialized1.ravel(order="F"), specialized2.ravel(order="F")
):
return ((start_s, start_t), (end_s, end_t))
else:
return None
def check_lines(first, second):
"""Checks if two curves are lines and tries to intersect them.
.. note::
This is a helper for :func:`.all_intersections`.
If they are not lines / not linearized, immediately returns :data:`False`
with no "return value".
If they are lines, attempts to intersect them (even if they are parallel
and share a coincident segment).
Args:
first (Union[SubdividedCurve, Linearization]): First curve being
intersected.
second (Union[SubdividedCurve, Linearization]): Second curve being
intersected.
Returns:
Tuple[bool, Optional[Tuple[numpy.ndarray, bool]]]: A pair of
* Flag indicating if both candidates in the pair are lines.
* Optional "result" populated only if both candidates are lines.
When this result is populated, it will be a pair of
* array of parameters of intersection
* flag indicating if the two candidates share a coincident segment
"""
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
if not (
first.__class__ is Linearization
and second.__class__ is Linearization
and first.error == 0.0
and second.error == 0.0
):
return False, None
s, t, success = segment_intersection(
first.start_node, first.end_node, second.start_node, second.end_node
)
if success:
if _py_helpers.in_interval(s, 0.0, 1.0) and _py_helpers.in_interval(
t, 0.0, 1.0
):
intersections = np.asfortranarray([[s], [t]])
result = intersections, False
else:
result = np.empty((2, 0), order="F"), False
else:
disjoint, params = parallel_lines_parameters(
first.start_node,
first.end_node,
second.start_node,
second.end_node,
)
if disjoint:
result = np.empty((2, 0), order="F"), False
else:
result = params, True
return True, result
def all_intersections(nodes_first, nodes_second):
r"""Find the points of intersection among a pair of curves.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
.. note::
This assumes both curves are in :math:`\mathbf{R}^2`, but does not
**explicitly** check this. However, functions used here will fail if
that assumption fails, e.g. :func:`bbox_intersect` and
:func:`newton_refine() <.hazmat.intersection_helpers.newton_refine>`.
Args:
nodes_first (numpy.ndarray): Control points of a curve to be
intersected with ``nodes_second``.
nodes_second (numpy.ndarray): Control points of a curve to be
intersected with ``nodes_first``.
Returns:
Tuple[numpy.ndarray, bool]: An array and a flag:
* A ``2 x N`` array of intersection parameters.
Each row contains a pair of values :math:`s` and :math:`t`
(each in :math:`\left[0, 1\right]`) such that the curves
intersect: :math:`B_1(s) = B_2(t)`.
* Flag indicating if the curves are coincident.
Raises:
ValueError: If the subdivision iteration does not terminate
before exhausting the maximum number of subdivisions.
NotImplementedError: If the subdivision process picks up too
many candidate pairs. This typically indicates tangent
curves or coincident curves (though there are mitigations for
those cases in place).
"""
curve_first = SubdividedCurve(nodes_first, nodes_first)
curve_second = SubdividedCurve(nodes_second, nodes_second)
candidate1 = Linearization.from_shape(curve_first)
candidate2 = Linearization.from_shape(curve_second)
# Handle the line-line intersection case as a one-off.
both_linear, result = check_lines(candidate1, candidate2)
if both_linear:
return result
candidates = [(candidate1, candidate2)]
intersections = []
coincident = False
for _ in range(_MAX_INTERSECT_SUBDIVISIONS):
candidates = intersect_one_round(candidates, intersections)
if len(candidates) > _MAX_CANDIDATES:
candidates = prune_candidates(candidates)
# If pruning didn't fix anything, we check if the curves are
# coincident and "fail" if they aren't.
if len(candidates) > _MAX_CANDIDATES:
params = coincident_parameters(nodes_first, nodes_second)
if params is None:
raise NotImplementedError(
_TOO_MANY_TEMPLATE.format(len(candidates))
)
intersections = params
coincident = True
# Artificially empty out candidates so that this
# function exits.
candidates = []
# If none of the candidate pairs have been accepted, then there are
# no more intersections to find.
if not candidates:
if intersections:
# NOTE: The transpose of a C-ordered array is Fortran-ordered,
# i.e. this is on purpose.
return np.array(intersections, order="C").T, coincident
return | np.empty((2, 0), order="F") | numpy.empty |
import unittest
import numpy as np
from ddt import ddt, data
from ...methods.convolution.convolve import convolve
from ...methods.transforms.poisson_noise import poisson_noise
@ddt
class TestConvolve(unittest.TestCase):
@data(
[np.ones([10, 10, 10]), np.ones([3, 3, 4])],
[np.ones([10, 10]), np.ones([3, 4])],
[np.ones([10]), np.ones([3])],
)
def test_dimensions(self, case):
img, psf = case
self.assertEqual(len(convolve(img, psf).shape), len(img.shape))
@data(
[ | np.ones([10, 10, 10]) | numpy.ones |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © %YEAR% <>
#
# Distributed under terms of the %LICENSE% license.
"""
This module is used to train liver model with intensity.
First use organ_localizator module to train intensity independent model.
"""
from loguru import logger
# logger = logging.getLogger()
import os
import sys
import os.path as op
sys.path.append(op.join(op.dirname(os.path.abspath(__file__)), "../../imcut/"))
import argparse
import glob
import traceback
import numpy as np
import imma
import io3d
def add_fv_extern_into_modelparams(modelparams):
"""
String description in modelparams key fv_extern is substututed wiht function
:param modelparams:
:return:
"""
# import PyQt4; PyQt4.QtCore.pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
if "fv_type" in modelparams.keys() and modelparams['fv_type'] == 'fv_extern':
if type(modelparams['fv_extern']) == str:
fv_extern_str = modelparams['fv_extern']
if fv_extern_str == "intensity_localization_fv":
modelparams['fv_extern'] = intensity_localization_fv
elif fv_extern_str == "localization_fv":
modelparams['fv_extern'] = localization_fv
elif fv_extern_str == "intensity_localization_2steps_fv":
modelparams['fv_extern'] = intensity_localization_2steps_fv
elif fv_extern_str == "near_blur_intensity_localization_fv":
modelparams['fv_extern'] = near_blur_intensity_localization_fv
print("blur intensity")
elif fv_extern_str == "with_ribs_fv":
modelparams['fv_extern'] = with_ribs_fv
logger.debug('with_ribs_fv used')
else:
logger.error("problem in modelparam fv_extern descritprion")
return modelparams
def with_ribs_fv(data3dr, voxelsize_mm, seeds=None, unique_cls=None): # scale
"""
Feature vector use intensity and body_navigation module with ribs.
Implemented by <NAME>
:param data3dr:
:param voxelsize_mm:
:param seeds:
:param unique_cls:
:return:
"""
pass
def near_blur_intensity_localization_fv(data3dr, voxelsize_mm, seeds=None, unique_cls=None): # scale
"""
Use organ_localizator features plus intensity features
:param data3dr:
:param voxelsize_mm:
:param seeds:
:param unique_cls:
:return:
"""
import scipy
import numpy as np
import os.path as op
try:
from lisa import organ_localizator
except:
import organ_localizator
# print "po importech"
fv = []
f0 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=0.5).reshape(-1, 1)
f1 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=3).reshape(-1, 1)
#f2 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=5).reshape(-1, 1) - f0
#f3 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=10).reshape(-1, 1) - f0
#f4 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=20).reshape(-1, 1) - f0
# position
#ss = lisa.body_navigation.BodyNavigation(data3dr, voxelsize_mm)
#ss.feature_function(data3d, voxelsize_mm)
#fd1 = ss.dist_to_lungs().reshape(-1, 1)
#fd2 = ss.dist_to_spine().reshape(-1, 1)
#fd3 = ss.dist_sagittal().reshape(-1, 1)
#fd4 = ss.dist_coronal().reshape(-1, 1)
#fd5 = ss.dist_to_surface().reshape(-1, 1)
#fd6 = ss.dist_diaphragm().reshape(-1, 1)
# print "pred f6"
f6 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=[20, 1, 1]).reshape(-1, 1) - f1
f7 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=[1, 20, 1]).reshape(-1, 1) - f1
f8 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=[1, 1, 20]).reshape(-1, 1) - f1
# print "pred organ_localizator"
ol = organ_localizator.OrganLocalizator()
ol.load(op.expanduser("~/lisa_data/liver.ol.p"))
fdall = ol.feature_function(data3dr, voxelsize_mm)
middle_liver = ol.predict_w(data3dr, voxelsize_mm, 0.85)
mn = np.median(data3dr[middle_liver==1])
fdn = np.ones(f0.shape) * mn
# print "fv shapes ", f0.shape, fd2.shape, fd3.shape
fv = np.concatenate([
f0,
f1,
# f2, f3, f4,
# fd1, fd2, fd3, fd4, fd5, fd6,
fdall,
f6, f7, f8,
fdn,
], 1)
if seeds is not None:
# logger.debug("seeds " + str(seeds))
# print "seeds ", seeds
sd = seeds.reshape(-1,1)
selection = np.in1d(sd, unique_cls)
fv = fv[selection]
sd = sd[selection]
# sd = sd[]
return fv, sd
return fv
def localization_fv(data3dr, voxelsize_mm, seeds=None, unique_cls=None): # scale
import scipy
import numpy as np
import os.path as op
try:
from lisa import organ_localizator
except:
import organ_localizator
import organ_localizator
fvall = organ_localizator.localization_fv(data3dr, voxelsize_mm)
return combine_fv_and_seeds([fvall], seeds, unique_cls)
def combine_fv_and_seeds(feature_vectors, seeds=None, unique_cls=None):
"""
Function can be used to combine information from feature vector and seeds. This functionality can be
implemented more efficiently.
:param feature_vector:
:param seeds:
:param unique_cls:
:return:
"""
if type(feature_vectors) != list:
logger.error("Wrong type: feature_vectors should be list")
return
fv = np.concatenate(feature_vectors, 1)
if seeds is not None:
# logger.debug("seeds " + str(seeds))
# print "seeds ", seeds
sd = seeds.reshape(-1,1)
selection = np.in1d(sd, unique_cls)
fv = fv[selection]
sd = sd[selection]
# sd = sd[]
return fv, sd
return fv
return
def intensity_localization_fv(data3dr, voxelsize_mm, seeds=None, unique_cls=None): # scale
"""
Use organ_localizator features plus intensity features
:param data3dr:
:param voxelsize_mm:
:param seeds:
:param unique_cls:
:return:
"""
import scipy
import numpy as np
import os.path as op
try:
from lisa import organ_localizator
except:
import organ_localizator
f0 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=0.5).reshape(-1, 1)
f1 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=3).reshape(-1, 1)
import organ_localizator
fvall = organ_localizator.localization_fv(data3dr, voxelsize_mm)
# fvall = organ_localizator.localization_intensity_fv(data3dr, voxelsize_mm)
fv = np.concatenate([
f0,
f1,
# f2, f3, # f4,
# fd1, fd2, fd3, fd4, fd5, fd6,
fvall,
# f6, f7, f8,
], 1)
if seeds is not None:
# logger.debug("seeds " + str(seeds))
# print "seeds ", seeds
sd = seeds.reshape(-1,1)
selection = np.in1d(sd, unique_cls)
fv = fv[selection]
sd = sd[selection]
# sd = sd[]
return fv, sd
return fv
def intensity_localization_2steps_fv(data3dr, voxelsize_mm, seeds=None, unique_cls=None): # scale
"""
Use organ_localizator features plus intensity features
:param data3dr:
:param voxelsize_mm:
:param seeds:
:param unique_cls:
:return:
"""
import scipy
import numpy as np
import os.path as op
try:
from lisa import organ_localizator
except:
import organ_localizator
# print "po importech"
fv = []
f0 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=0.5).reshape(-1, 1)
f1 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=3).reshape(-1, 1)
# f2 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=5).reshape(-1, 1) - f0
# f3 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=10).reshape(-1, 1) - f0
# f4 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=20).reshape(-1, 1) - f0
# position
#ss = lisa.body_navigation.BodyNavigation(data3dr, voxelsize_mm)
#ss.feature_function(data3d, voxelsize_mm)
#fd1 = ss.dist_to_lungs().reshape(-1, 1)
#fd2 = ss.dist_to_spine().reshape(-1, 1)
#fd3 = ss.dist_sagittal().reshape(-1, 1)
#fd4 = ss.dist_coronal().reshape(-1, 1)
#fd5 = ss.dist_to_surface().reshape(-1, 1)
#fd6 = ss.dist_diaphragm().reshape(-1, 1)
# print "pred f6"
# f6 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=[10, 1, 1]).reshape(-1, 1) - f1
# f7 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=[1, 10, 1]).reshape(-1, 1) - f1
# f8 = scipy.ndimage.filters.gaussian_filter(data3dr, sigma=[1, 1, 10]).reshape(-1, 1) - f1
# print "pred organ_localizator"
ol = organ_localizator.OrganLocalizator()
ol.load(op.expanduser("~/lisa_data/liver.ol.p"))
fdall = ol.feature_function(data3dr, voxelsize_mm)
middle_liver = ol.predict_w(data3dr, voxelsize_mm, 0.85)
mn = np.median(data3dr[middle_liver==1])
fdn = np.ones(f0.shape) * mn
# print "fv shapes ", f0.shape, fd2.shape, fd3.shape
fv = np.concatenate([
f0,
f1,
# f2, f3, # f4,
# fd1, fd2, fd3, fd4, fd5, fd6,
fdall,
# f6, f7, f8,
fdn,
], 1)
if seeds is not None:
# logger.debug("seeds " + str(seeds))
# print "seeds ", seeds
sd = seeds.reshape(-1,1)
selection = np.in1d(sd, unique_cls)
fv = fv[selection]
sd = sd[selection]
# sd = sd[]
return fv, sd
return fv
class ModelTrainer():
def __init__(self, feature_function=None, modelparams={}):
from imcut import pycut
self.working_voxelsize_mm = [1.5, 1.5, 1.5]
self.data=None
self.target=None
# self.cl = sklearn.naive_bayes.GaussianNB()
# self.cl = sklearn.mixture.GMM()
#self.cl = sklearn.tree.DecisionTreeClassifier()
if feature_function is None:
feature_function = intensity_localization_fv
# self.feature_function = feature_function
modelparams_working = {
'fv_type': "fv_extern",
'fv_extern': feature_function,
'type': 'gmmsame',
'params': {'cvtype': 'full', 'n_components': 15},
'adaptation': 'original_data',
}
modelparams = add_fv_extern_into_modelparams(modelparams)
modelparams_working.update(modelparams)
self.cl = pycut.Model(modelparams=modelparams_working)
def _fv(self, data3dr, voxelsize_mm):
fev = self.cl.features_from_image(data3dr, voxelsize_mm)
# print fev
return fev
def _add_to_training_data(self, data3dr, voxelsize_mm, segmentationr):
# print "funkce _add_to_training_data () "
# print data3dr.shape
# print segmentationr.shape
fv = self._fv(data3dr, voxelsize_mm)
data = fv[::50]
target = np.reshape(segmentationr, [-1, 1])[::50]
# print "shape ", data.shape, " ", target.shape
if self.data is None:
self.data = data
self.target = target
else:
self.data = np.concatenate([self.data, data], 0)
self.target = np.concatenate([self.target, target], 0)
# self.cl.fit(data, target)
#f1[segmentationr == 0]
def fit(self):
# print "sf fit data shape ", self.data.shape
self.cl.fit(self.data, self.target)
def predict(self, data3d, voxelsize_mm):
data3dr = imma.image.resize_to_mm(data3d, voxelsize_mm, self.working_voxelsize_mm)
fv = self._fv(data3dr)
# print "shape predict ", fv.shape,
pred = self.cl.predict(fv)
# print "predict ", pred.shape,
return imma.image.resize_to_shape(pred.reshape(data3dr.shape), data3d.shape)
def scores(self, data3d, voxelsize_mm):
data3dr = imma.image.resize_to_mm(data3d, voxelsize_mm, self.working_voxelsize_mm)
fv = self._fv(data3dr)
# print "shape predict ", fv.shape,
scoreslin = self.cl.scores(fv)
scores = {}
for key in scoreslin:
scores[key] = imma.image.resize_to_shape(scoreslin[key].reshape(data3dr.shape), data3d.shape)
return scores
def __preprocessing(data3d):
pass
def add_train_data(self, data3d, segmentation, voxelsize_mm):
data3dr = imma.image.resize_to_mm(data3d, voxelsize_mm, self.working_voxelsize_mm)
segmentationr = imma.image.resize_to_shape(segmentation, data3dr.shape)
logger.debug(str(np.unique(segmentationr)))
logger.debug(str(data3dr.shape) + str(segmentationr.shape))
self._add_to_training_data(data3dr, self.working_voxelsize_mm, segmentationr)
#f1 scipy.ndimage.filters.gaussian_filter(data3dr, sigma=5)
def train_liver_model_from_sliver_data(self, *args, **kwargs):
"""
see train_sliver_from_dir()
:param args:
:param kwargs:
:return:
"""
return self.train_organ_model_from_dir(*args, **kwargs)
def train_organ_model_from_dir(
self,
output_file="~/lisa_data/liver_intensity.Model.p",
reference_dir='~/data/medical/orig/sliver07/training/',
orig_pattern="*orig*[1-9].mhd",
ref_pattern="*seg*[1-9].mhd",
label=1,
segmentation_key=False
):
"""
:param output_file:
:param reference_dir:
:param orig_pattern:
:param ref_pattern:
:param label: label with the segmentation, if string is used, list of labels "slab" is used (works for .pklz)
:param segmentation_key: Load segmentation from "segmentation" key in .pklz file
:return:
"""
logger.debug("label: {}".format(str(label)))
reference_dir = op.expanduser(reference_dir)
orig_fnames = glob.glob(reference_dir + orig_pattern)
ref_fnames = glob.glob(reference_dir + ref_pattern)
orig_fnames.sort()
ref_fnames.sort()
if len(orig_fnames) == 0:
logger.warning("No file found in path:\n{}".format(reference_dir + orig_pattern))
print(ref_fnames)
for oname, rname in zip(orig_fnames, ref_fnames):
logger.debug(oname)
data3d_orig, metadata = io3d.datareader.read(oname, dataplus_format=False)
vs_mm1 = metadata['voxelsize_mm']
data3d_seg, metadata = io3d.datareader.read(rname, dataplus_format=False)
vs_mm = metadata['voxelsize_mm']
if segmentation_key is not None:
data3d_seg = metadata['segmentation']
if type(label) == str:
try:
label = metadata["slab"][label]
except:
logger.error(traceback.format_exc())
logger.error("Problem with label\nRequested label: {}\n".format(str(label)))
if "slab" in metadata.keys():
logger.error("slab:")
logger.error(str(metadata['slab']))
logger.error("unique numeric labels in segmentation:\n{}".format(str(np.unique(data3d_seg))))
raise
# liver have label 1, background have label 2
data3d_seg = (data3d_seg == label).astype(np.int8)
# sf.add_train_data(data3d_orig, data3d_seg, voxelsize_mm=vs_mm)
try:
self.add_train_data(data3d_orig, data3d_seg, voxelsize_mm=vs_mm)
except:
traceback.print_exc()
print("problem - liver model")
pass
# fvhn = copy.deepcopy(fvh)
#fhs_list.append(fvh)
self.fit()
output_file = op.expanduser(output_file)
print("Saved into: ", output_file)
self.cl.save(output_file)
def train_liver_model_from_sliver_data(*args, **kwargs
):
return train_organ_model_from_dir(*args, **kwargs)
def train_organ_model_from_dir(
output_file="~/lisa_data/liver_intensity.Model.p",
reference_dir='~/data/medical/orig/sliver07/training/',
orig_pattern="*orig*[1-9].mhd",
ref_pattern="*seg*[1-9].mhd",
label=1,
segmentation_key=False,
modelparams={}
):
sf = ModelTrainer(modelparams=modelparams)
sf.train_organ_model_from_dir(
output_file=output_file,
reference_dir=reference_dir,
orig_pattern=orig_pattern,
ref_pattern=ref_pattern,
label=label,
segmentation_key=segmentation_key
)
return sf.data, sf.target
def model_score_from_sliver_data(
# output_file="~/lisa_data/liver_intensity.Model.p",
sliver_reference_dir='~/data/medical/orig/sliver07/training/',
orig_pattern="*orig*[1-9].mhd",
ref_pattern="*seg*[1-9].mhd",
modelparams={},
likelihood_ratio=0.5,
savefig=False,
savefig_fn_prefix='../graphics/bn-symmetry-',
show=False,
label='',
):
"""
:param label: text label added to all records in output table
:param sliver_reference_dir:
:param orig_pattern:
:param ref_pattern:
:param modelparams:
:param likelihood_ratio: float number between 0 and 1, scalar or list. Set the segmentation threshodl
:param savefig:
:param savefig_fn_prefix:
:param show: show images
:return:
"""
import pandas as pd
from imcut import pycut
import sed3
import matplotlib.pyplot as plt
import volumetry_evaluation
sliver_reference_dir = op.expanduser(sliver_reference_dir)
orig_fnames = glob.glob(sliver_reference_dir + orig_pattern)
ref_fnames = glob.glob(sliver_reference_dir + ref_pattern)
orig_fnames.sort()
ref_fnames.sort()
evaluation_all = []
for oname, rname in zip(orig_fnames, ref_fnames):
print(oname)
data3d_orig, metadata = io3d.datareader.read(oname, dataplus_format=False)
vs_mm1 = metadata['voxelsize_mm']
data3d_seg, metadata = io3d.datareader.read(rname, dataplus_format=False)
vs_mm = metadata['voxelsize_mm']
mdl = pycut.Model(modelparams=modelparams)
# m0 = mdl.mdl[2]
# len(m0.means_)
vs_mmr = [1.5, 1.5, 1.5]
data3dr = imma.image.resize_to_mm(data3d_orig, vs_mm1, vs_mmr)
lik1 = mdl.likelihood_from_image(data3dr, vs_mmr, 0)
lik2 = mdl.likelihood_from_image(data3dr, vs_mmr, 1)
if | np.isscalar(likelihood_ratio) | numpy.isscalar |
import matplotlib.pyplot as plt
import numpy as np
import pickle
from datetime import date
with open('mhws_data.pkl', 'rb') as f:
[dates, t, sst, mhws, clim] = pickle.load(f)
ev = | np.argmax(mhws['intensity_max']) | numpy.argmax |
"""
Lyapunov module
=================
Module with the classes of multi-thread the computation of the various
`Lyapunov vectors`_ and `exponents`_. Integrate using the `Runge-Kutta method`_
defined in the :mod:`~.integrators.integrate` module.
See :cite:`lyap-KP2012` for more details on the Lyapunov vectors theoretical framework.
Module classes
--------------
* :class:`LyapunovsEstimator` to estimate the Backward and Forward Lyapunov Vectors (BLVs and FLVs) along a trajectory
* :class:`CovariantLyapunovsEstimator` to estimate the Covariant Lyapunov Vectors (CLVs) along a trajectory
.. _Lyapunov vectors: https://en.wikipedia.org/wiki/Lyapunov_vector
.. _exponents: https://en.wikipedia.org/wiki/Lyapunov_exponent
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
.. _Numba: https://numba.pydata.org/
References
----------
.. bibliography:: ../model/ref.bib
:labelprefix: LYAP-
:keyprefix: lyap-
"""
from numba import njit
import numpy as np
import qgs.integrators.integrate as integrate
from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse
import multiprocessing
class LyapunovsEstimator(object):
"""Class to compute the Forward and Backward `Lyapunov vectors`_ and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
The method used to compute the Lyapunov vectors is the one introduced by
Benettin et al. :cite:`lyap-BGGS1980`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.ic = None
self._time = None
self._pretime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self._adjoint = False
self._forward = -1
self._inverse = 1.
self.func = None
self.func_jac = None
self._ics_queue = None
self._lyap_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._lyap_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(LyapProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._lyap_queue))
for process in self._processes_list:
process.daemon = True
process.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_lyapunovs(self, t0, tw, t, dt, mdt, ic=None, write_steps=1, n_vec=None, forward=False, adjoint=False,
inverse=False):
"""Estimate the Lyapunov vectors using the Benettin algorithm along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `t`.
The result of the estimation can be obtained afterward by calling :meth:`get_lyapunovs`.
If `forward` is `True`, it yields the Forward Lyapunov Vectors (FLVs) between `t0` and `tw`, otherwise, returns the Backward
Lyapunov Vectors (BLVs) between `tw` and `t`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
tw: float
Time at which the algorithm start to store the Lyapunov vectors. Define thus also the transient before the which the Lyapunov
vectors are considered as having not yet converged. Must be between `t0` and `t`.
t: float
Final time of the time integration. Corresponds to the final condition.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
forward: bool, optional
If `True`, yield the `Forward Lyapunov Vectors` (FLVs) between `t0` and `tw`.
If `False`, yield the `Backward Lyapunov Vectors` (BLVs) between `tw` and `t`.
Default to `False`, i.e. Backward Lyapunov Vectors estimation.
adjoint: bool, optional
If true, integrate the tangent :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}` ,
else, integrate the adjoint linear model :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}^T(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}`.
Integrate the tangent model by default.
inverse: bool, optional
Whether or not to invert the Jacobian matrix
:math:`\\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\rightarrow \\boldsymbol{\\mathrm{J}}^{-1}(t, \\boldsymbol{x})`.
`False` by default.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
self._pretime = np.concatenate((np.arange(t0, tw, dt), np.full((1,), tw)))
self._time = np.concatenate((np.arange(tw, t, dt), np.full((1,), t)))
self.write_steps = write_steps
if forward:
self._forward = 1
else:
self._forward = -1
self._adjoint = adjoint
self._inverse = 1.
if inverse:
self._inverse *= -1.
if write_steps == 0:
self.n_records = 1
else:
if not forward:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
else:
tot = self._pretime[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._pretime[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, mdt, self.ic[i], self.n_vec, self.write_steps,
self._forward, self._adjoint, self._inverse))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._lyap_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
def get_lyapunovs(self):
"""Returns the result of the previous Lyapunov vectors estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self._forward == -1:
tt = self._time
else:
tt = self._pretime
if self.write_steps > 0:
if tt[::self.write_steps][-1] == tt[-1]:
return tt[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((tt[::self.write_steps], np.full((1,), tt[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return tt[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
class LyapProcess(multiprocessing.Process):
""":class:`LyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
lyap_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, lyap_queue):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._lyap_queue = lyap_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
if args[7] == -1:
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
else:
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
self._lyap_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
self._ics_queue.task_done()
@njit
def _compute_forward_lyap_jit(f, fjac, time, posttime, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((time[:-1], posttime)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
traj = ttraj[:, :, :len(time)]
posttraj = ttraj[:, :, len(time)-1:]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
rposttime = reverse(posttime)
rtime = reverse(time)
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(rposttime[:-1], np.diff(rposttime))):
y[0] = posttraj[i_traj, :, -1-ti]
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = -1
for ti, (tt, dt) in enumerate(zip(rtime[:-1], np.diff(rtime))):
y[0] = traj[i_traj, :, -1-ti]
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw -= 1
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, 0] = m_exp
recorded_traj[i_traj, :, 0] = y[0]
recorded_vec[i_traj, :, :, 0] = q
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_jit(f, fjac, pretime, time, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
pretraj = ttraj[:, :, :len(pretime)]
traj = ttraj[:, :, (len(pretime)-1):]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
y[0] = pretraj[i_traj, :, 0]
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(pretime[:-1], np.diff(pretime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = pretraj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = 0
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw += 1
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = traj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, -1] = m_exp
recorded_traj[i_traj, :, -1] = y[0]
recorded_vec[i_traj, :, :, -1] = q
return recorded_traj, recorded_exp, recorded_vec
class CovariantLyapunovsEstimator(object):
"""Class to compute the Covariant `Lyapunov vectors`_ (CLVs) and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Default to `0`, i.e. Ginelli et al. algorithm.
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`). Default to 0 (no perturbation).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
method: int
Select the method used to compute the CLVs:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspaces spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
noise_pert: float
Noise perturbation parameter of the diagonal of the matrix resulting from the backpropagation during the Ginelli step.
Mainly done to avoid ill-conditioned matrices near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None, noise_pert=0., method=0):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
self.ic = None
self._time = None
self._pretime = None
self._aftertime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self._recorded_bvec = None
self._recorded_fvec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self.method = method
self.func = None
self.func_jac = None
self._ics_queue = None
self._clv_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def set_noise_pert(self, noise_pert):
"""Set the noise perturbation :attr:`noise_pert` parameter.
Parameters
----------
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if :attr:`method` is 0.
"""
self.noise_pert = noise_pert
self.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._clv_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(ClvProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._clv_queue, self.noise_pert))
for process in self._processes_list:
process.daemon = True
process.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_clvs(self, t0, ta, tb, tc, dt, mdt, ic=None, write_steps=1, n_vec=None, method=None, backward_vectors=False, forward_vectors=False):
"""Estimate the Covariant Lyapunov Vectors (CLVs) along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `tc`. Return the CLVs between `ta` and `tb`.
The result of the estimation can be obtained afterward by calling :meth:`get_clvs`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
ta: float
Define the time span between `t0` and `ta` of the first part of the algorithm, which obtain the convergence to the Backward Lyapunov vectors
(initialization of the Benettin algorithm).
tb: float
Define the time span between `ta` and `tb` where the Covariant Lyapunov Vectors are computed.
tc: float
Final time of the time integration algorithm. Define the time span between `tb` and `tc` where, depending on the value of :attr:`method`,
the convergence to the Forward Lyapunov Vectors or to the Covariant Lyapunov Vectors (thanks to the Ginelli steps) is obtained.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Use the Ginelli et al. algorithm if not provided.
backward_vectors: bool, optional
Store also the computed Backward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the BLVs if not provided.
forward_vectors: bool, optional
Store also the computed Forward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the FLVs if not provided.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
if method is not None:
self.method = method
self._pretime = np.concatenate((np.arange(t0, ta, dt), np.full((1,), ta)))
self._time = np.concatenate((np.arange(ta, tb, dt), np.full((1,), tb)))
self._aftertime = np.concatenate((np.arange(tb, tc, dt), np.full((1,), tc)))
self.write_steps = write_steps
if write_steps == 0:
self.n_records = 1
else:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
if self.method == 1:
if forward_vectors:
self._recorded_fvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
if backward_vectors:
self._recorded_bvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, self._aftertime, mdt, self.ic[i], self.n_vec,
self.write_steps, self.method))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._clv_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
if self.method == 1:
if forward_vectors:
self._recorded_fvec[args[0]] = args[5]
if backward_vectors:
self._recorded_bvec[args[0]] = args[4]
def get_clvs(self):
"""Returns the result of the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
def get_blvs(self):
"""Returns the BLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The BLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_bvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_bvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
def get_flvs(self):
"""Returns the FLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The FLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_fvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_fvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
class ClvProcess(multiprocessing.Process):
""":class:`CovariantLyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
clv_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, clv_queue, noise_pert):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._clv_queue = clv_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
method = args[8]
if method == 0:
recorded_traj, recorded_exp, recorded_vec = _compute_clv_gin_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[6], args[7],
self.b, self.c, self.a, self.noise_pert)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
else:
recorded_traj, recorded_exp, recorded_vec, backward_vec, forward_vec = _compute_clv_sub_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[7], self.b, self.c, self.a)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), | np.squeeze(recorded_exp) | numpy.squeeze |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import numpy as np
import qiskit
from ddt import ddt, data as test_data, unpack
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.providers.aer import AerJob
from qiskit.providers.aer.backends.aerbackend import AerBackend
from qiskit.result import Result
from scipy import sparse
from scipy.sparse import linalg
from dc_qiskit_algorithms.ControlledStatePreparation import ControlledStatePreparationGate
logging.basicConfig(format=logging.BASIC_FORMAT, level='ERROR')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# noinspection NonAsciiCharacters
@ddt
class ControlledMottonenStatePrepTests(unittest.TestCase):
@unpack
@test_data(
{'matrix': [
[+0.1, +0.1],
[+0.2, +0.1],
[+0.3, +0.1],
[+0.4, +0.1]
]}
)
def test_angle_matrix_y(self, matrix):
matrix = sparse.dok_matrix(matrix)
columns_norm = linalg.norm(matrix, axis=0)
matrix = matrix.multiply(np.power(columns_norm, -1)).todense()
abs_matrix = np.abs(matrix)
log.info("Input Matrix (Y):\n" + str(abs_matrix))
iso_gate = ControlledStatePreparationGate(matrix)
angle_matrix = iso_gate._to_angle_matrix_y()
log.info("Final Angle Matrix (Y):\n" + str(angle_matrix.todense()))
# The recovery is done by using a pre factor of 1/2 given the definition of the R_y gate
matrix_recovered = sparse.dok_matrix(matrix.shape)
for col in range(angle_matrix.shape[1]):
matrix_recovered[0, col] = np.cos(0.5 * angle_matrix[0, col]) * np.cos(0.5 * angle_matrix[1, col])
matrix_recovered[1, col] = np.cos(0.5 * angle_matrix[0, col]) * np.sin(0.5 * angle_matrix[1, col])
matrix_recovered[2, col] = np.sin(0.5 * angle_matrix[0, col]) * np.cos(0.5 * angle_matrix[2, col])
matrix_recovered[3, col] = np.sin(0.5 * angle_matrix[0, col]) * np.sin(0.5 * angle_matrix[2, col])
log.info("Recovered Matrix (Y):\n" + str(matrix_recovered.multiply(columns_norm).todense()))
self.assertAlmostEqual(np.linalg.norm(abs_matrix - matrix_recovered), 0.0, delta=1e-13)
@unpack
@test_data(
{'matrix': [
[-0.1, +0.1],
[+0.1, +0.1],
[-0.1, +0.1],
[+0.1, +0.1]
]}
)
def test_angle_matrix_z(self, matrix):
matrix = sparse.dok_matrix(matrix)
columns_norm = linalg.norm(matrix, axis=0)
matrix = matrix.multiply(np.power(columns_norm, -1)).todense()
phase_matrix = np.angle(matrix)
log.info("Input Phase Matrix (Z):\n" + str(phase_matrix))
iso_gate = ControlledStatePreparationGate(matrix)
angle_matrix, global_phase = iso_gate._to_angle_matrix_z()
angle_matrix = sparse.vstack([global_phase, angle_matrix]).todok()
log.info("Final Angle Matrix (Z):\n" + str(angle_matrix.todense()))
# The recovery is done by using a pre factor of 1/2 given the definition of the R_y gate
matrix_recovered = sparse.dok_matrix(matrix.shape)
for col in range(angle_matrix.shape[1]):
matrix_recovered[0, col] = np.angle(np.exp(1.0j * angle_matrix[0, col]) * np.exp(-0.5j * angle_matrix[1, col]) * np.exp(-0.5j * angle_matrix[2, col]))
matrix_recovered[1, col] = np.angle(np.exp(1.0j * angle_matrix[0, col]) * np.exp(-0.5j * angle_matrix[1, col]) * np.exp(+0.5j * angle_matrix[2, col]))
matrix_recovered[2, col] = np.angle(np.exp(1.0j * angle_matrix[0, col]) * np.exp(+0.5j * angle_matrix[1, col]) * np.exp(-0.5j * angle_matrix[3, col]))
matrix_recovered[3, col] = np.angle(np.exp(1.0j * angle_matrix[0, col]) * np.exp(+0.5j * angle_matrix[1, col]) * np.exp(+0.5j * angle_matrix[3, col]))
global_phases = np.unique(phase_matrix - matrix_recovered.todense(), axis=0)
self.assertEqual(global_phases.shape, (1, matrix_recovered.shape[1]))
matrix_recovered_1 = global_phases + matrix_recovered
log.info("Recovered Matrix (Z):\n" + str(matrix_recovered_1))
self.assertAlmostEqual(np.linalg.norm(phase_matrix - matrix_recovered_1), 0.0, delta=1e-13)
@unpack
@test_data(
{'matrix': [
[+0.1, +0.1],
[+0.2, +0.1],
[+0.3, +0.1],
[+0.4, +0.1]
]},
{'matrix': [
[-0.1, +0.1],
[+0.1, -0.1],
[+0.1, +0.1],
[+0.1, -0.1]
]},
{'matrix': [
[+0.5, +0.1, +0.5, +0.1],
[+0.1, +0.1, +0.5, +0.0],
[+0.3, +0.1, +0.1, +0.1],
[+0.1, +0.1, +0.0, +0.0]
]},
{'matrix': np.asarray([
[+0.5, +0.1, +0.1, +0.0],
[+0.1, -0.4, +0.1, +0.0],
[+0.2, +0.4, +0.1, +1.0],
[+0.1, +0.1, +0.7, +0.0]
])}
)
def test_define(self, matrix):
log.info("STARTING TEST")
matrix = sparse.dok_matrix(matrix)
columns_norm = linalg.norm(matrix, axis=0)
matrix_normed: np.matrix = matrix.multiply(np.power(columns_norm, -1)).todense()
target_qubits = int(np.ceil(np.log2(matrix.shape[0])))
control_qubits = int(np.ceil(np.log2(matrix.shape[1])))
# We can compute the expected state vector by assuming we use an equal superposition (hadamard) on the
# control qubits. We need to not only use the normed matrix, but also factor in the Hadamard factors 2^{-n/2}
theoretical_state_vector: np.ndarray = np.asarray(matrix_normed.ravel(order='F')).reshape(-1) * np.power(2, -control_qubits / 2)
log.info(f"Expected State: {theoretical_state_vector.tolist()}")
ctrl_qb = QuantumRegister(control_qubits, name='ctrl')
tgt_qb = QuantumRegister(target_qubits, name='tgt')
qc = QuantumCircuit(tgt_qb, ctrl_qb)
# The numbering is LSB on the left / MSB on the right. This creates unexpected results if not taken into account
qc.h(ctrl_qb)
qc.append(ControlledStatePreparationGate(matrix_normed), list(ctrl_qb) + list(reversed(tgt_qb)))
# The the resulting state vector from the state vector simulator
backend: AerBackend = qiskit.Aer.get_backend('statevector_simulator')
job: AerJob = qiskit.execute(qc, backend)
result: Result = job.result()
vector: np.ndarray = result.get_statevector()
# Computing the test:
# The extraced state from the simulation is allowed to be off by a common (global) phase
# If this is the case, taking the angle difference and correcting it, should give the same vector
correction = np.angle(theoretical_state_vector[0]) - np.angle(vector[0])
vector_phase_corrected = vector * np.exp(1.0j * correction)
log.info(f"Actual State: {theoretical_state_vector.tolist()}")
diff = vector_phase_corrected - theoretical_state_vector
self.assertAlmostEqual(np.linalg.norm(diff), 0.0, places=13)
if log.level == logging.DEBUG:
basic_qc = qiskit.transpile(qc, optimization_level=0, basis_gates=['uni_rot_rz', 'uni_rot_ry', 'state_prep_möttönen', 'h'])
log.debug(f"\n{basic_qc.draw(fold=-1)}")
basic_qc = qiskit.transpile(qc, optimization_level=0, basis_gates=['rz', 'cp', 'cx', 'ry', 'p', 'h'])
log.debug(f"\n{basic_qc.draw(fold=-1)}")
basic_qc = qiskit.transpile(qc, optimization_level=3, basis_gates=['u3', 'u2', 'u1', 'cx'])
log.debug(f"\n{basic_qc.draw(fold=-1)}")
log.debug('Theoretical result:')
log.debug(np.round(theoretical_state_vector, decimals=4).tolist())
log.debug('Absolute:')
log.debug(np.round(np.abs(vector), decimals=4).tolist())
log.debug(np.round(np.abs(theoretical_state_vector), decimals=4).tolist())
log.debug('Angle:')
corrected_angle_vector = correction + np.angle(vector)
corrected_angle_vector = np.fmod(corrected_angle_vector, 2*np.pi)
log.debug(np.round(corrected_angle_vector, decimals=4).tolist())
log.debug(np.round(np.angle(theoretical_state_vector), decimals=4).tolist())
log.debug('TEST:')
angle_diff = corrected_angle_vector - np.angle(theoretical_state_vector)
abs_diff = | np.abs(vector) | numpy.abs |
"""
lorentz
-------
helper functions for the lorentz notebook.
"""
#---------------- Importing relevant modules ----------------
import numpy as np # For arrays.
import matplotlib.pyplot as plt # For plotting.
import matplotlib.animation as animation # For animations.
from IPython.display import HTML # For displaying in jupyter.
import pandas as pd # For reading and saving labeled data.
from numpy import genfromtxt # For reading data from file.
#---------------- Implemented functions ---------------------
def findnearest(array, value):
"""Returns the nearest element of an array to a number.
Parameters
----------
array : numpy array
The array for which we want to know the closest element to the provided value.
value : float
The value we want to find in the array.
Returns
-------
float
The element of the array closest to the value provided.
"""
idx = np.abs(array - value).argmin()
return array[idx]
def plot_empty_space():
"""Plots an empty plot to represent empty space.
"""
time = genfromtxt('data/lz_time.csv', delimiter=',')
space = genfromtxt('data/lz_space.csv', delimiter=',')
fig, ax = plt.subplots(figsize =(10,7))
plt.plot(space,time,linewidth=0,label='Playground')
plt.legend()
plt.show()
def plot_light_cones():
"""Plots light cones with labels for different regions of spacetime.
"""
time = genfromtxt('data/lz_time.csv', delimiter=',')
space = genfromtxt('data/lz_space.csv', delimiter=',')
line1 = genfromtxt('data/lz_line1.csv', delimiter=',')
line2 = genfromtxt('data/lz_line2.csv', delimiter=',')
fig, ax = plt.subplots(figsize =(10,7))
plt.plot(space,line1,linewidth=1,color='red')
plt.plot(space,line2,linewidth=1,color='red')
plt.xlim(-20,20)
plt.ylim(-10,20)
plt.annotate(' Causal Future',(-5,10),
xytext=(0.5, 0.9), textcoords='axes fraction',
fontsize=16,
horizontalalignment='center', verticalalignment='top')
plt.annotate('Causal Past',(-5,10),
xytext=(0.5, 0.1), textcoords='axes fraction',
fontsize=16,
horizontalalignment='center', verticalalignment='top')
plt.annotate('Acausal region',(0,10),
xytext=(0.8, 0.4), textcoords='axes fraction',
fontsize=16,
horizontalalignment='center', verticalalignment='top')
plt.annotate('Acausal region',(0,10),
xytext=(0.2, 0.4), textcoords='axes fraction',
fontsize=16,
horizontalalignment='center', verticalalignment='top')
plt.title('Light Cones')
plt.show()
def plot_event_at_origin():
"""PLots an event at the origin of a set of light cones.
"""
time = genfromtxt('data/lz_time.csv', delimiter=',')
space = genfromtxt('data/lz_space.csv', delimiter=',')
line1 = genfromtxt('data/lz_line1.csv', delimiter=',')
line2 = genfromtxt('data/lz_line2.csv', delimiter=',')
fig, ax = plt.subplots(figsize =(10,7))
plt.plot(space,line1,linewidth=1,color='red')
plt.plot(space,line2,linewidth=1,color='red')
plt.xlim(-20,20)
plt.ylim(-2,20)
plt.plot([0], [0], 'o')
plt.title('Transform of an event at the origin')
plt.show()
def plot_flashing_lighthouse():
"""Plots the sequence of lights flashing at a lighthouse.
"""
time = genfromtxt('data/lz_time.csv', delimiter=',')
space = genfromtxt('data/lz_space.csv', delimiter=',')
line1 = genfromtxt('data/lz_line1.csv', delimiter=',')
line2 = genfromtxt('data/lz_line2.csv', delimiter=',')
line3 = genfromtxt('data/lz_line3.csv', delimiter=',')
line4 = genfromtxt('data/lz_line4.csv', delimiter=',')
fig, ax = plt.subplots(figsize =(10,7))
plt.plot(space,line1,linewidth=1,color='red')
plt.plot(space,line2,linewidth=1,color='red')
plt.xlim(-20,20)
plt.ylim(-2,20)
plt.plot(line3, line4, 'o')
plt.title('Flashing lighthouse at the origin')
plt.show()
def lorentz(v):
"""Defines the Lorentz transformation as a 2x2 matrix.
Parameters
----------
v : float
A velocity for which we want to apply the lorentz transform.
Returns
-------
np.ndarray
The lorentz transformation matrix associated with the velocity.
"""
gamma=1.0/np.sqrt(1-v*v)
return np.array([[gamma,-gamma*v],[-gamma*v,gamma]])
def plot_lighthouse_transform():
"""Plots a transformed persepective of a lighthouse.
"""
time = genfromtxt('data/lz_time.csv', delimiter=',')
space = genfromtxt('data/lz_space.csv', delimiter=',')
line1 = genfromtxt('data/lz_line1.csv', delimiter=',')
line2 = genfromtxt('data/lz_line2.csv', delimiter=',')
line3 = genfromtxt('data/lz_line3.csv', delimiter=',')
line4 = genfromtxt('data/lz_line4.csv', delimiter=',')
line5 = pd.read_hdf('data/lz_line5.hdf', 'line5')
line6 = pd.read_hdf('data/lz_line6.hdf', 'line6')
line5 = line5[findnearest(line5.columns,0.8)]
line6 = line6[findnearest(line6.columns,0.8)]
fig, ax = plt.subplots(figsize =(10,7))
plt.plot(space,line1,linewidth=1,color='red')
plt.plot(space,line2,linewidth=1,color='red')
plt.xlim(-20,20)
plt.ylim(-2,20)
plt.plot(line6, line5, 'o')
plt.plot(line3, line4, 'o',color='green')
plt.title('Flashing lighthouse at the origin - moving observer')
plt.show()
def animation_lorentz_1():
"""Creates an animation showing how regularly spaced events move through space
for a moving observer.
Returns
-------
HTML-animation
Animation of regularly spaced events being transfomed by the lorentz transform.
"""
time=np.linspace(-6,20,100)
space=np.linspace(-20,20,100)
line1=np.linspace(-20,20,100)
line2=np.linspace(20,-20,100)
line3=np.zeros(11)
line4=np.linspace(0,10,11)
line5 = pd.read_hdf('data/lz_line5.hdf', 'line5')
line6 = pd.read_hdf('data/lz_line6.hdf', 'line6')
def datagen(u=1.05):
"""Generates the velocities used for the run function in the animation.
Parameters
----------
u : float, optional
The initial velocity value.
Yields
------
float
Successive velocity values used in the animation.
"""
while u > -1:
u -= 0.05
yield u
def init():
"""Initial Frame of the animation.
"""
l1.set_data(space,line1)
l2.set_data(space,line2)
l4.set_data(line3, line4)
ax.set_xlim(-20,20)
ax.set_ylim(-2,20)
def run(u):
"""Updates sucssesive frames in the animation.
Parameters
----------
u : float
Velocity for which the events are being transformed in each frame.
Returns
-------
matplotlib line
This line has its data updated in each frame.
"""
l3.set_data(line6[findnearest(line6.columns, u)], line5[findnearest(line5.columns, u)])
text.set_text('$u$ = {:.2f}c'.format(u))
return l3
fig, ax = plt.subplots(figsize =(10,7))
ax.set_xlabel('distance')
ax.set_ylabel('time')
l1, = ax.plot([], [], lw=1,color='red')
l2, = ax.plot([], [], lw=1,color='red')
l3, = ax.plot([], [], 'o', color = 'blue')
l4, = ax.plot([], [], 'o', color = 'green')
text = plt.text(10,3,'$u$ = {:.2f}'.format(0.1), size = 20)
ani = animation.FuncAnimation(fig, run, datagen, blit=False, interval=100,
repeat=True, init_func=init)
return HTML(ani.to_jshtml())
def animation_with_hyperbolae():
"""Creates an animation showing how regularly spaced events move
through space for a moving observer with hyperbolae.
Returns
-------
HTML-animation
Animation of regually spaced events being transfomed and moved along hyperbolae.
"""
time=np.linspace(-6,20,100)
space= | np.linspace(-20,20,100) | numpy.linspace |
import matplotlib.pyplot as plt
import numpy as np
from MLG import imagepath, paperpath, path
from imageio import imread
import matplotlib.cbook as cbook
from MLG.utils import color_own
from matplotlib import rc
__all__ = ['dark','Image_Window','Image_precision','Image_Illustration','Image_Illustration_Multi','Image_compare_micro','Image_astroshift', 'create_all_Image']
def dark(onof = 0):
if onof is 'on': plt.style.use('dark_background')
elif onof is 'off': plt.style.use('default')
elif onof == True: plt.style.use('dark_background')
else: plt.style.use('default')
def Image_Window(string = 'resolve_Window', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
else: black = color_own([0.,0.,0.,1])
c1 = color_own([0,1,1,1])
c2 = color_own([1,0,0,1])
c3 = color_own([1,1,0.2,1])
c4 = color_own([0.4,0.4,0.4,1])
c_star = [c1,c2,c3,c4]
c_grid1 = color_own([0,int(dark),1,1])
c_grid2 = color_own([0.5,1,0,1])
star= np.array([[0, 0],[0.1,0.9],[-1,-1.1],[-0.5,0.1]])
fig = plt.figure(figsize = [12,12])
x_width = 0.059
y_width = 0.177
#------------------------------------------------------------
# axis
plt.xticks( fontsize = 25)
plt.yticks( fontsize = 25)
plt.grid(True)
plt.axis('equal')
plt.axis([-1.5,1.5,-1.4,1.6])
plt.ylabel('Across-scan direction (AC) [arcsec]', fontsize = 30)
plt.xlabel('Along-scan direction (AL) [arcsec]', fontsize = 30)
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Major Star
for i in range(-6,7):
plt.plot([-6*x_width,6*x_width], [i*y_width,i*y_width], c = c_grid1,linewidth = 3)
plt.plot([i*x_width,i*x_width], [-6*y_width,6*y_width], c = c_grid1, linewidth = 3)
plt.text(0,1.4,"Along-scan direction\n $12\,\mathrm{pix} \\times 0.059 \mathrm{''/pix} = 0.708\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 0)
plt.text(0.7,0,"Across-scan direction\n $12\,\mathrm{pix} \\times 0.177 \mathrm{''/pix} = 2.124\mathrm{''}$",fontsize = 25, verticalalignment = 'center', horizontalalignment = 'center', rotation = 90)
plt.arrow(0,6*y_width+2*x_width, -6*x_width+0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(0,6*y_width+2*x_width, 6*x_width-0.02,0,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, -6*y_width+0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.arrow(8*x_width,0,0, 6*y_width-0.02,color= black, head_width=0.1,\
overhang = 0.5, length_includes_head=True ,zorder = 10, linewidth = 3)
plt.scatter(star[:1,0], star[:1,1], marker=(5, 1),c = c_star[:1], s = [3000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_1.png', format = 'png')
#------------------------------------------------------------
#------------------------------------------------------------
#Grid Minor Star
plt.scatter(star[1:3,0], star[1:3,1], marker=(5, 1),c = c_star[1:3], s = [2000,2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_2.png', format = 'png')
for i in range(-5,8):
plt.plot([-15*x_width,-6*x_width], [i*y_width,i*y_width], c = c_grid2,linewidth = 3, zorder = -1)
for i in range(-15,-5):
plt.plot([i*x_width,i*x_width], [-5*y_width,7*y_width], c = c_grid2, linewidth = 3, zorder = -1)
plt.scatter(star[3:,0], star[3:,1], marker=(5, 1),c = c_star[3:], s = [2000], zorder = 1000)
if pres: fig.savefig(imagepath + string + '_3.png', format = 'png')
#------------------------------------------------------------
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_precision(string = 'Sig_vs_Gmag', Gaia_precision = path+'InputTable/resolution_Gaia.png', pres = False):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0.5,1,0,1])
color5 = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0.85,0,0,1])
color2 = color_own([0,0,1,1])
color3 = color_own([0,1,1,1])
color4 = color_own([0,1,0,1])
color5 = color_own([1,1,0,1])
fig = plt.figure(figsize = [12,10])
Gmag = np.arange(4,22,0.01)
datafile = cbook.get_sample_data(Gaia_precision)
img = imread(datafile)
z = 10 ** (0.4 * (np.maximum(Gmag, 14) - 15)) #(14-np.minimum(Gmag, 14))
z2 = 10 ** (0.4 * (np.maximum(Gmag, 12) - 15))
sig_pi = (-1.631 + 680.766 * z2 + 32.732 * z2**2)**0.5/1000
sig_fov2 =(-1.631 + 680.766 * z + 32.732 * z**2)**0.5/1000 *7.75 +0.1
sig_fov3 = sig_fov2 / np.sqrt(9)
plt.plot([0,1],[-5,-5], c = color1, linewidth = 3, label = 'formal precision from Gaia DR2 (per CCD)' )
plt.plot([0,1],[-5,-5], c = color2, linewidth = 3, label = 'actual precision from Gaia DR2 (per CCD)' )
plt.yticks([np.log10(i) for i in [20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02, 0.01]],[20,10, 5,2,1, 0.5,0.2,0.1, 0.05,0.02,0.01], fontsize = 25)
plt.xticks( fontsize = 25)
plt.ylabel('Standard deviation of AL field angle [mas]', fontsize = 30)
plt.xlabel('G magnitude', fontsize = 30)
plt.imshow(img, zorder=0, extent=[5, 21.04, np.log10(0.0195),np.log10(10)])
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.plot(Gmag,np.log10(sig_pi), '--',c = color3, dashes =(5,5), linewidth = 3, label= 'predicted end-of-mission parallax error')
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov2), ':' , c = color4, linewidth = 5, label= 'used Standard deviation (per CCD)' )
if pres:
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '_3.png', format = 'png')
plt.plot(Gmag,np.log10(sig_fov3) ,c = color5,linewidth = 7, label= 'used Standard deviation for 9 CCD observations' )
plt.plot([5, 21.04, 21.04,5,5], [np.log10(0.0195),np.log10(0.0195),np.log10(10),np.log10(10),np.log10(0.0195)], linewidth = 2, color = [0.5,0.5,0.5,1], zorder = 0.1)
plt.axis('auto')
plt.xlim([4,22])
plt.ylim([np.log10(0.005),np.log10(40)])
plt.legend(loc = 'upper left',fontsize = 20)
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration(string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
color1 = color_own([0,1,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0.5,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
else:
black = color_own([0.,0.,0.,1])
color1 = color_own([0,0,1,1])
color2 = color_own([1,0.5,0,1])
color3 = color_own([0,1,0,1])
color4 = color_own([1,0,1,1])
color5 = color_own([0,1,1,1])
color6 = color_own([0,1,1,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 1.5
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
for i in range(len(t)):
xm1 =np.array([-1,1]) * np.cos(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1]) * np.sin(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1]) * np.cos(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1]) * np.sin(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= color1, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= color2, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = color1,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = color2, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
if i% 2 == 0:
plt.plot(xm2,ym2, color = black, linewidth = 3,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 3,zorder = 1)
else:
plt.plot(xm2,ym2, color = 'grey', linewidth = 2, zorder = -1)
plt.plot(xm1,ym1, color = 'grey', linewidth = 2, zorder = -1)
#if i ==0 :
plt.plot(x1,y1, color = color3, linewidth = 3)
plt.plot(x2,y2, color = color1, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
plt.plot(X2,Y2, color = color2, linewidth = 3)
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = color3, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
plt.text(4,7.5,'Star 1',color = color2,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
fig.savefig(imagepath + string + '.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration2 (string = 'Illustration'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
#Position_lens
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
#unlensed Position_source
x2 = np.linspace(5,9,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
TE = 2
X2 = x2 + (x2-x1) * TE/(d**2 +2*TE)
Y2 = y2 + (y2-y1) * TE/(d**2 +2*TE)
dX2 = x1-X2
dY2 = y1-Y2
dx2 = x1-x2
dy2 = y1-y2
fig = plt.figure(figsize= (12,8))
ax = plt.subplot(111)
ax.axis('equal')
ax.axis('off')
#---------------------------------------------------------------
#axis
plt.xlim([-0.5,14])
xr = 12
yr = 7
plt.text(xr-0.8,0,'RA $\cdot$ cos(Dec)',verticalalignment = 'center',fontsize = 25)
plt.text(0,yr + 0.25,'Dec',fontsize = 25, horizontalalignment = 'center', rotation = 90)
plt.arrow(-0.025,0,xr-1,0,width = 0.05,overhang = 0.5,head_width = 0.5, head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.arrow(0,-0.025,0,yr-0.5,width = 0.05,overhang = 0.5,head_width = 0.5,head_length = 0.5,color= black, zorder = 100,length_includes_head=True)
plt.text(2,1.5,'Lens',color = grey, fontsize = 25, horizontalalignment = 'center', rotation = 0, weight = 'bold')
#---------------------------------------------------------------
# Motion source
plt.plot(x1,y1, color = grey, linewidth = 7)
fig.savefig(imagepath + string + '_1.png', format = 'png')
plt.text(4,7.5,'Source',color = blue,fontsize = 25, horizontalalignment = 'center', rotation = 0,weight = 'bold')
plt.plot(x2,y2, color = cyan, linestyle= '--',dashes=(10, 5), linewidth = 3, zorder = -1)
fig.savefig(imagepath + string + '_2.png', format = 'png')
plt.plot(X2,Y2, color = blue, linewidth = 3)
for i in range(len(t)):
plt.plot([x2[t[i]],X2[t[i]]],[y2[t[i]],Y2[t[i]]],':',color = black)
fig.savefig(imagepath + string + '_3.png', format = 'png')
delta = 0.05
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
plt.plot(xm2,ym2, color = black, linewidth = 1,zorder = 1)
plt.plot(xm1,ym1, color = black, linewidth = 1,zorder = 1)
fig.savefig(imagepath + string + '_4.png', format = 'png')
for i in range(len(t)):
xm1 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + x1[t[i]]
ym1 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + y1[t[i]]
xm2 =np.array([-1,1,1,-1,-1]) * np.cos(scandir[i]) + delta * np.array([-1,-1,1,1,-1]) * np.sin(scandir[i]) + X2[t[i]]
ym2 =np.array([-1,1,1,-1,-1]) * np.sin(scandir[i]) - delta * np.array([-1,-1,1,1,-1]) * np.cos(scandir[i]) + Y2[t[i]]
dsc = ((dx2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dy2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
dSC = ((dX2[t[i]]).reshape(-1,1)*[np.sin(scandir[i]),np.cos(scandir[i])] \
+ (dY2[t[i]]).reshape(-1,1) *[-np.cos(scandir[i]),np.sin(scandir[i])])[0]
ttX2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],X2[t[i]],X2[t[i]]])
ttY2 = np.array([0,-dSC[1]/2,dSC[1]/2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],Y2[t[i]],Y2[t[i]]])
ttx2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.cos(scandir[i]) + ([x1[t[i]],x1[t[i]],x2[t[i]],x2[t[i]]])
tty2 = np.array([0,-dsc[1]/2-0.2,dsc[1]/2-0.2,0]) * np.sin(scandir[i]) +([y1[t[i]],y1[t[i]],y2[t[i]],y2[t[i]]])
if i % 2 == 0:
plt.arrow(ttx2[2],tty2[2], 0.0001*(ttx2[2]-ttx2[1]),0.0001*(tty2[2]-tty2[1]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttx2[1],tty2[1], 0.0001*(ttx2[1]-ttx2[2]),0.0001*(tty2[1]-tty2[2]),color= red, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[2],ttY2[2], 0.0001*(ttX2[2]-ttX2[1]),0.0001*(ttY2[2]-ttY2[1]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True ,zorder = 10)
plt.arrow(ttX2[1],ttY2[1], 0.0001*(ttX2[1]-ttX2[2]),0.0001*(ttY2[1]-ttY2[2]),color= orange, head_width=0.2,\
overhang = 0.5, length_includes_head=True, zorder = 10)
plt.plot(ttx2[0:2],tty2[0:2],color = black, linestyle= ':')
plt.plot(ttX2[0:2],ttY2[0:2],color = black, linestyle= ':')
plt.plot(ttx2[1:3],tty2[1:3],color = red,linewidth = 3 , linestyle= '--',dashes=(10, 10))
plt.plot(ttX2[1:3],ttY2[1:3],color = orange, linewidth = 3,linestyle= '-')
plt.plot(ttx2[2:],tty2[2:],color = black, linestyle= ':')
plt.plot(ttX2[2:],ttY2[2:],color = black, linestyle= ':')
#if i ==0 :
fig.savefig(imagepath + string + '_5.png', format = 'png')
print('Create Image: '+ imagepath+ string + '.png')
if paperpath is not None: fig.savefig(paperpath + string + '.png', format = 'png')
plt.close(fig)
def Image_Illustration_Multi(string = 'Illustration_Multi'):
'''------------------------------------------------------------
Description:
---------------------------------------------------------------
Input:
---------------------------------------------------------------
Output:
------------------------------------------------------------'''
dark= 'black' in plt.rcParams['savefig.facecolor']
if dark:
string = 'dark_'+string
black = color_own([0.7,0.7,0.7,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,.5,0,1])
else:
black = color_own([0.,0.,0.,1])
grey = color_own([.5,.5,0.5,1])
cyan = color_own([0,1,1,1])
blue = color_own([0,0,1,1])
lime = color_own([0.6,1.2,0,1])
green = color_own([0,1,0,1])
red = color_own([1,0,0,1])
orange = color_own([1,1,0,1])
t = np.array([12, 35, 41, 61, 73, 89])
scandir = np.array([0.1, 0.7, 0.4, 0.8 , 0.2, 0.1])*np.pi
x1 = np.linspace(1,13,100) + 0.3 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y1 = np.linspace(1,3,100) + 0.3* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x2 = np.linspace(3,7,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y2 = np.linspace(7,4.5,100)# + 0.03* np.cos(np.linspace(np.pi/4,12*np.pi/4,100))
x3 = np.linspace(12,10,100)# + 0.03 * np.sin(np.linspace(np.pi/4,12*np.pi/4,100))
y3 = | np.linspace(8,6,100) | numpy.linspace |
import pytest
def test_auto_config_get_tpot_config():
from foreshadow.estimators.config import get_tpot_config
setup1 = get_tpot_config("classification", include_preprocessors=True)
setup2 = get_tpot_config("regression", include_preprocessors=True)
setup3 = get_tpot_config("classification")
setup4 = get_tpot_config("regression")
assert set(setup3.keys()).issubset(set(setup1.keys()))
assert setup1 != setup3
assert set(setup4.keys()).issubset(set(setup2.keys()))
assert setup2 != setup4
def test_auto_config_invalid_input():
from foreshadow.estimators.config import get_tpot_config
with pytest.raises(ValueError) as e:
_ = get_tpot_config("test")
assert "type_:" in str(e.value)
def test_invalid_problem_type():
from foreshadow.estimators import AutoEstimator
with pytest.raises(ValueError) as e:
_ = AutoEstimator(problem_type="test")
assert "problem type must be in " in str(e.value)
def test_invalid_auto():
from foreshadow.estimators import AutoEstimator
with pytest.raises(ValueError) as e:
_ = AutoEstimator(auto="test")
assert "auto must be in " in str(e.value)
def test_invalid_kwargs_not_dict():
from foreshadow.estimators import AutoEstimator
with pytest.raises(ValueError) as e:
_ = AutoEstimator(
problem_type="regression", auto="tpot", estimator_kwargs="test"
)
assert str(e.value) == "estimator_kwargs must be a valid kwarg dictionary"
@pytest.mark.skip(
reason=(
"auto-sklearn is a pain to install waiting on: "
"https://github.com/automl/auto-sklearn/pull/703"
)
)
def test_override_kwarg_dict():
from foreshadow.estimators import AutoEstimator
# if this is erroring make sure that auto_sklearn is installed
ae = AutoEstimator(
problem_type="regression",
auto="autosklearn",
estimator_kwargs={"include_preprocessors": ["kitchen_sinks"]},
)
est = ae.construct_estimator([1, 2, 3])
assert est.include_preprocessors == ["kitchen_sinks"]
def test_temp():
import pandas as pd
import numpy as np
from foreshadow.estimators import AutoEstimator
y = pd.DataFrame(np.array([0] * 50 + [1] * 50))
ae1 = AutoEstimator()
_ = ae1.construct_estimator(y)
_ = AutoEstimator()
@pytest.mark.skip(
reason=(
"auto-sklearn is a pain to install waiting on: "
"https://github.com/automl/auto-sklearn/pull/703"
)
)
def test_default_estimator_setup_classification():
import numpy as np
import pandas as pd
from autosklearn.classification import AutoSklearnClassifier
from foreshadow.estimators import AutoEstimator
y = pd.DataFrame(np.array([0] * 50 + [1] * 50))
ae = AutoEstimator()
est = ae.construct_estimator(y)
assert isinstance(est, AutoSklearnClassifier)
def test_default_estimator_setup_classification_autosklearn_not_installed(
mocker
):
import numpy as np
import pandas as pd
from tpot import TPOTClassifier
from foreshadow.estimators import AutoEstimator
mocker.patch.dict("sys.modules", {"autosklearn": None})
y = pd.DataFrame(np.array([0] * 50 + [1] * 50))
ae = AutoEstimator()
with pytest.warns(Warning) as w:
est = ae.construct_estimator(y)
assert isinstance(est, TPOTClassifier)
assert "is not available, defaulting to" in str(w[0].message)
def test_default_estimator_setup_regression():
import numpy as np
import pandas as pd
from tpot import TPOTRegressor
from foreshadow.estimators import AutoEstimator
y = pd.DataFrame(np.random.normal(0, 1, 200))
ae = AutoEstimator()
est = ae.construct_estimator(y)
assert isinstance(est, TPOTRegressor)
@pytest.mark.skip(
reason="Waiting on issue https://github.com/automl/auto-sklearn/issues/514"
)
@pytest.mark.slowest
def test_auto_default_to_autosklearn():
import random
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from foreshadow.estimators import AutoEstimator
seed = 0
np.random.seed(seed)
random.seed(seed)
X = pd.DataFrame(np.array([0] * 50 + [1] * 50).reshape((-1, 1)))
y = pd.DataFrame(np.array([0] * 50 + [1] * 50))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
ae = AutoEstimator(
problem_type="classification",
auto="autosklearn",
estimator_kwargs={"time_left_for_this_task": 20, "seed": seed},
)
ae.fit(X, y)
ae_predict = ae.predict(X_test)
ae_predict_proba = ae.predict_proba(X_test)
ae_score = ae.score(X_test, y_test)
expected_predict = np.array([0, 1, 0, 1, 1, 1, 0, 1, 1, 1])
expected_predict_proba = np.array(
[
[0.8584763163857105, 0.14152368227318532],
[0.13621543275812661, 0.8637845659007688],
[0.8584763163857105, 0.14152368227318532],
[0.13621543275812661, 0.8637845659007688],
[0.13621543275812661, 0.8637845659007688],
[0.13621543275812661, 0.8637845659007688],
[0.8584763163857105, 0.14152368227318532],
[0.13621543275812661, 0.8637845659007688],
[0.1362179604041567, 0.863782038254739],
[0.1362179604041567, 0.863782038254739],
]
)
expected_score = 1.0
raise Exception()
assert | np.allclose(ae_predict, expected_predict) | numpy.allclose |
import scipy.stats
import numpy as np
def f_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
f_value : float
Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
p_value : float
Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
# compute P(F < f_value) with nx-1, ny-1 df
cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return f_value, p_value
def f1_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
p_value : float
Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = | np.var(sample_x, ddof=1) | numpy.var |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pymc3 as pm
class DCA:
def __init__(self, period=30, cash=300.):
self.period = period
self.cash = cash
class Investor:
def __init__(self, dca=DCA()):
self.cash = 0.
self.invested = 0.
self.shares = 0
self.returns = 0.
self.gains = 0.
self.dca = dca
class BuyAndHoldInvestmentStrategy:
def __init__(self, investor=Investor(), tr_cost=2.):
self.investor = investor
self.tr_cost = tr_cost
def invest(self, data, ring=None, position=None):
if len(data.keys()) == 0:
return
indexes = data.index[data.index % self.investor.dca.period == 0]
if ring is not None and position is not None:
index_selector = np.arange(len(indexes))
indexes = indexes[index_selector % ring == position]
prices = data.ix[indexes].values
prices = prices.reshape(prices.shape[0], )
invested = np.full((len(prices),), fill_value=self.investor.dca.cash)
invest_minus_tr_cost = np.subtract(invested, 2.)
shares = np.round(np.divide(invest_minus_tr_cost, prices))
real_invest = np.multiply(shares, prices)
cash = np.sum(np.subtract(invest_minus_tr_cost, real_invest))
self.investor.shares = np.sum(shares)
self.investor.invested = np.sum(invested)
try:
self.investor.gains = | np.sum(shares) | numpy.sum |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import copy
import importlib.resources
import json
import logging
import os
import platform
import random
import tempfile
import time
import cv2
import networkx as nx
import numpy as np
from ai2thor.build import arch_platform_map, build_name
from ai2thor.controller import Controller
from fuzzywuzzy import fuzz
import teach.meta_data_files.ai2thor_resources as ai2thor_resources
import teach.meta_data_files.config as config_directory
from teach.dataset.initialization import Initialization
from teach.dataset.pose import Pose
from teach.logger import create_logger
from teach.settings import get_settings
from teach.simulators.simulator_base import SimulatorBase
# Commit where FillLiquid bug is fixed: https://github.com/allenai/ai2thor/issues/844
COMMIT_ID = "fdc047690ee0ab7a91ede50d286bd387d379713a"
# debug manual flag
debug_print_all_sim_steps = False
logger = create_logger(__name__)
class TEAChController(Controller):
def __init__(self, base_dir: str, **kwargs):
self._base_dir = base_dir
os.makedirs(base_dir, exist_ok=True)
super().__init__(**kwargs)
@staticmethod
def build_local_executable_path(base_dir: str, commit_id: str, release_dir: str = "releases"):
"""Helper method to build the path to the local executable. Useful when executable is pre-downloaded."""
arch = arch_platform_map[platform.system()]
name = build_name(arch, commit_id)
return os.path.join(base_dir, release_dir, name, name)
@staticmethod
def base_dir_in_tmp():
tempdir = tempfile.gettempdir()
base_dir = os.path.join(tempdir, "ai2thor")
os.makedirs(base_dir, exist_ok=True)
return base_dir
@property
def base_dir(self):
return self._base_dir
class SimulatorTHOR(SimulatorBase):
def __init__(
self,
task_type="eqa_complex",
comments=None,
fps=25,
logger_name=__name__,
logger_level=logging.DEBUG,
dir_out=None,
s3_bucket_name=None,
web_window_size=900,
commander_embodied=False,
visibility_distance=1.5,
):
"""
Constructor for Simulator_THOR - a wrapper over AI2-THOR
:param task_type: Type of task. This is currently user-defined. Default = 'eqa_complex'
:type task_type: String
:param comments: Informative comments for the entire data collection session. Default = None (use current day, time)
:type comments: String
:param fps: Maximum frame rate for video feed. Default = 25
:type fps: Integer
:param logger_name: Name of logger. Default = __name__ (name of the current module)
:type logger_name: String
:param logger_level: Level for logger. Default = logging.DEBUG
:type logger_level: Enumeration. See logging.setLevel()
:param dir_out: Output directory for logging
:type dir_out: String
:param s3_bucket_name: S3 bucket for logging
:type s3_bucket_name: String
:param web_window_size: Window/ image sizes (square) to be used by simulator; 900 for TEACh data collection
:type web_window_size: Int
:param commander_embodied: True if the Commander should also be allowed to interact with objects; False for
TEACh data collection
:type commander_embodied: Bool
:param visibility_distance: Max distance an agent can be from an object to successfully interact with it; 1.5
for TEACh data collection
:type visibility_distance: Float
"""
time_start = time.time()
super().__init__(
task_type,
comments,
fps=fps,
logger_name=logger_name,
logger_level=logger_level,
dir_out=dir_out,
s3_bucket_name=s3_bucket_name,
)
time_base_init = time.time()
logger.info("Initializing simulator... time to init Simulator_base: %s sec" % (time_base_init - time_start))
self.controller = None
teach_settings = get_settings()
self.controller_base_dir = teach_settings.AI2THOR_BASE_DIR
use_local_exe = teach_settings.AI2THOR_USE_LOCAL_EXE
self.controller_local_executable_path = (
TEAChController.build_local_executable_path(self.controller_base_dir, COMMIT_ID) if use_local_exe else None
)
self.world_type = "Kitchen"
self.world = None
self.grid_size = 0.25
self.hotspot_pixel_width = 10
self.web_window_size = web_window_size
self.commander_embodied = commander_embodied
self.randomize_object_search = False
self.visibility_distance = visibility_distance
self.object_target_camera_idx = None
self.navigation_graph = self.navigation_points = None
self.topdown_cam_orth_size = self.topdown_lower_left_xz = None # Used for MapGoals
self.floor_oid = None # used for handoffs to temporarily store objects on the floor
# The following is a dictionary for custom object metadata. When adding custom object properties, DO NOT use
# property names already used by AI2-THOR. If the same property is needed here, prefix the property name with
# the project for which you are using it. For example, the AI2-THOR property isSliced could be changed to
# simbotIsSliced if the project simbot needed custom behaviour from isSliced
self.__custom_object_metadata = dict()
# Affordances by action type - identifies what properties an object must satisfy for it to be possible to take
# an action on it; Used in highlighting valid objects in TEACh data collection interface to assist annotators
self.action_to_affordances = {
"Pickup": [{"pickupable": True, "isPickedUp": False}],
"Place": [{"receptacle": True}],
"Open": [{"openable": True, "isOpen": False}],
"Close": [{"openable": True, "isOpen": True}],
"ToggleOn": [{"toggleable": True, "isToggled": False}],
"ToggleOff": [{"toggleable": True, "isToggled": True}],
"Slice": [{"sliceable": True, "isSliced": False}],
"Dirty": [{"dirtyable": True, "isDirty": False}],
"Clean": [{"dirtyable": True, "isDirty": True}],
"Fill": [{"canFillWithLiquid": True, "isFilledWithLiquid": False}],
"Empty": [{"canFillWithLiquid": True, "isFilledWithLiquid": True}],
"Pour": [
{"canFillWithLiquid": True, "isFilledWithLiquid": False},
{"objectType": "Sink"},
{"objectType": "SinkBasin"},
{"objectType": "Bathtub"},
{"objectType": "BathtubBasin"},
],
"Break": [{"breakable": True, "isBroken": False}],
}
time_end = time.time()
logger.info("Finished initializing simulator. Total time: %s sec" % (time_end - time_start))
def set_task(self, task, task_params=None, comments=""):
"""
Set the current task to provided Task_THOR object
Tasks are defined in json files under task_definitions
:param task: instance of Task_THOR class
:param task_params list of parameters to the task, possibly empty; must match definition nparams in length
:param comments: Informative comments for the current task. Default = ''
:type comments: String
"""
logger.debug("Setting task = %s" % str(task))
new_task = copy.deepcopy(task)
if task_params is not None:
new_task.task_params = task_params
new_task.comments = comments
new_task.episodes = [] if self.current_episode is None else [self.current_episode]
self._dataset.add_task(new_task)
self.current_task = new_task
self.logger.debug("New task: %d, %s, %s, %s" % (task.task_id, task.task_name, comments, str(task.task_params)))
self.to_broadcast["info"] = {"message": ""}
logger.info("SimulatorTHOR set_task done New task: %d, %s, %s" % (task.task_id, task.task_name, comments))
def set_task_by_id(self, task_id: int, task_params=None, comments=""):
"""
Set the current task to task defined in default_definitions.json with provided task_id
:param task_id: task id number from task definition json file
:param task_params list of parameters to the task, possibly empty; must match definition nparams in length
:param comments: Informative comments for the current task. Default = ''
:type comments: String
"""
task = self._dataset.definitions.map_tasks_id2info[task_id]
task.task_params = task_params
self.set_task(task=task, task_params=task_params, comments=comments)
def set_task_by_name(self, task_name: str, task_params=None, comments=""):
"""
Set the current task to task defined in default_definitions.json with provided task_name
:param task_name task name from task definition json file
:param task_params list of parameters to the task, possibly empty; must match definition nparams in length
:param comments: Informative comments for the current task. Default = ''
:type comments: String
"""
task = self._dataset.definitions.map_tasks_name2info[task_name]
task.task_params = task_params
self.set_task(task=task, task_params=task_params, comments=comments)
def __add_obj_classes_for_objs(self):
"""
For each object in AI2-THOR metadata, update with manually defined object classes to be tracked in custom
properties
"""
# Load custom object classes
with importlib.resources.open_text(ai2thor_resources, "custom_object_classes.json") as file_handle:
custom_object_classes = json.load(file_handle)
# Assign custom classes to each object
all_objects = self.get_objects(self.controller.last_event)
for obj in all_objects:
cur_obj_classes = [obj["objectType"]]
if obj["objectType"] == "Sink":
cur_obj_classes += ["SinkBasin"]
if obj["objectType"] == "SinkBasin":
cur_obj_classes += ["Sink"]
if obj["objectType"] == "Bathtub":
cur_obj_classes += ["BathtubBasin"]
if obj["objectType"] == "BathtubBasin":
cur_obj_classes += ["Bathtub"]
if obj["objectType"] in custom_object_classes:
cur_obj_classes += custom_object_classes[obj["objectType"]]
self.__update_custom_object_metadata(obj["objectId"], "simbotObjectClass", cur_obj_classes)
def __init_custom_object_metadata(self):
"""
Reset custom object metadata to initial state: erase previously tracked properties, add manual classes for all
objects and check for custom property updates from current state
"""
self.__custom_object_metadata = dict()
self.__add_obj_classes_for_objs()
self.__check_per_step_custom_properties()
def __check_per_step_custom_properties(self, objs_before_step=None):
"""
Check whether any custom object properties need to be updated; Should be called after taking each action
"""
# Update whether things got cleaned and filled with water
self.__update_sink_interaction_outcomes(self.controller.last_event)
# Update whether a mug should be filled with coffee
self.__update_custom_coffee_prop(self.controller.last_event, objs_before_step)
# Update whether things got cooked
self.__update_custom_property_cooked(self.controller.last_event)
# Check for objects that are boiled at the start of the episode
self.__update_custom_property_boiled(objs_before_step, self.controller.last_event)
def __update_custom_object_metadata(self, object_id, custom_property_name, custom_property_value):
"""
Update custom properties
"""
if object_id not in self.__custom_object_metadata:
self.__custom_object_metadata[object_id] = dict()
self.__custom_object_metadata[object_id][custom_property_name] = custom_property_value
def __append_to_custom_object_metadata_list(self, object_id, custom_property_name, custom_property_value):
"""
Add values to custom properties that are lists
"""
if object_id not in self.__custom_object_metadata:
self.__custom_object_metadata[object_id] = dict()
if custom_property_name not in self.__custom_object_metadata[object_id]:
self.__custom_object_metadata[object_id][custom_property_name] = list()
if custom_property_value not in self.__custom_object_metadata[object_id][custom_property_name]:
self.__custom_object_metadata[object_id][custom_property_name].append(custom_property_value)
def __delete_from_custom_object_metadata_list(self, object_id, custom_property_name, custom_property_value):
"""
Delete values from custom properties that are lists
"""
if (
object_id in self.__custom_object_metadata
and custom_property_name in self.__custom_object_metadata[object_id]
and custom_property_value in self.__custom_object_metadata[object_id][custom_property_name]
):
del self.__custom_object_metadata[object_id][custom_property_name][
self.__custom_object_metadata[object_id][custom_property_name].index(custom_property_value)
]
def __delete_object_from_custom_object_metadata(self, object_id):
"""
Delete custom properties of an object
:param object_id: ID of object whose properties are to be deleted
"""
if object_id in self.__custom_object_metadata:
del self.__custom_object_metadata[object_id]
for oid in self.__custom_object_metadata:
for prop in self.__custom_object_metadata[oid]:
if (
type(self.__custom_object_metadata[oid][prop]) is list
and object_id in self.__custom_object_metadata[oid][prop]
):
del self.__custom_object_metadata[oid][prop][
self.__custom_object_metadata[oid][prop].index(object_id)
]
elif object_id == self.__custom_object_metadata[oid][prop]:
self.__custom_object_metadata[oid][prop] = None
def __transfer_custom_metadata_on_slicing_cracking(self, objects):
"""
When objects get sliced or cracked, their object IDs change because one object may become multiple objects.
Transfer custom properties from the original object to the new object(s)
:param objects: Output of get_objects()
"""
objects_to_delete = set()
for obj in objects:
transfer_needed = False
orig_obj_id = None
if "Sliced" in obj["objectId"]:
transfer_needed = True
orig_obj_id = "|".join(obj["objectId"].split("|")[:-1])
if "Cracked" in obj["objectId"]:
transfer_needed = True
orig_obj_id = "|".join(obj["objectId"].split("|")[:-1])
if transfer_needed and orig_obj_id is not None and orig_obj_id in self.__custom_object_metadata:
self.__custom_object_metadata[obj["objectId"]] = copy.deepcopy(
self.__custom_object_metadata[orig_obj_id]
)
if (
"simbotLastParentReceptacle" in self.__custom_object_metadata[obj["objectId"]]
and self.__custom_object_metadata[obj["objectId"]]["simbotLastParentReceptacle"] is not None
):
poid = self.__custom_object_metadata[obj["objectId"]]["simbotLastParentReceptacle"]
self.__append_to_custom_object_metadata_list(poid, "simbotIsReceptacleOf", obj["objectId"])
objects_to_delete.add(orig_obj_id)
for obj_id in objects_to_delete:
self.__delete_object_from_custom_object_metadata(obj_id)
def get_objects(self, event=None):
"""
Return objects augmented by custom properties
:param event: Simulator event to be used to obtain object properties, usually self.controller.last_event to get
current object states
"""
if event is None:
if self.commander_embodied:
event = self.controller.last_event.events[0]
else:
event = self.controller.last_event
for obj in event.metadata["objects"]:
if obj["objectId"] in self.__custom_object_metadata:
obj.update(self.__custom_object_metadata[obj["objectId"]])
return event.metadata["objects"]
def get_inventory_objects(self, event):
"""
Return objects held in hand by agents
:param event: Simulator event to be used to obtain object properties, usually self.controller.last_event to get
current object states
"""
for obj in event.metadata["inventoryObjects"]:
if obj["objectId"] in self.__custom_object_metadata:
obj.update(self.__custom_object_metadata[obj["objectId"]])
return event.metadata["inventoryObjects"]
def start_new_episode(
self,
world=None,
world_type=None,
object_tuples=None,
commander_embodied=None,
episode_id=None,
randomize_object_search=False,
):
"""
Start a new episode in a random scene
:param world: AI2-THOR floor plan to be used or None; if None a random scene (matching specified world_type
if provided) is used
:param world_type: One of "Kitchen", "Bedroom", "Bathroom", "Living room" or None; if world is None and
world_type is specified, a random world of the specified world_type is used
:param object_tuples: Used to specify initial states of objects
:param commander_embodied: True if the Commander should also be allowed to interact with objects; False for
TEACh data collection
:param episode_id: Used to specify a custom episode ID
:param randomize_object_search: If True, attempts to search for objects will return a random object of type
matching the search string; if false, the object closest to the agent is always returned on search
"""
logger.info("In simulator_THOR.start_new_episode, world = %s world_type = %s" % (world, world_type))
self.randomize_object_search = randomize_object_search
if commander_embodied is not None:
self.commander_embodied = commander_embodied
else:
self.commander_embodied = False
logger.info("SimulatorTHOR warning: commander_embodied was not set on first episode init; default to False")
if world is None:
world_type, world = self.select_random_world(world_type=world_type)
super().start_new_episode(
world=world,
world_type=world_type,
object_tuples=object_tuples,
commander_embodied=commander_embodied,
episode_id=episode_id,
randomize_object_search=randomize_object_search,
)
logger.info("In SimulatorTHOR.start_new_episode, before __launch_simulator")
self.__launch_simulator(world=world, world_type=world_type)
logger.info("In SimulatorTHOR.start_new_episode, completed __launch_simulator")
self.__init_custom_object_metadata()
state = self.get_scene_object_locs_and_states()
self.current_episode.initial_state = Initialization(
time_start=0,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
def save(self, file_name=None):
"""
Save the session using the current state as the final simulator state. This does not shut down the simulator.
Call done() instead if simulator should be shut down after this
:param file_name: If file_name is not None, the simulator session is saved in the same format as original games
"""
# Add final state to log.
state = self.get_scene_object_locs_and_states()
self.current_episode.final_state = Initialization(
time_start=time.time() - self.start_time,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
# Save log file
super().save(file_name=file_name)
def done(self, file_name=None):
"""
Shut down the simulator and save the session with final simulator state; Should be called at end of collection/
replay of an episode
:param file_name: If file_name is not None, the simulator session is saved in the same format as original games
"""
# Add final state to log.
state = self.get_scene_object_locs_and_states()
self.current_episode.final_state = Initialization(
time_start=time.time() - self.start_time,
agents=state["agents"],
objects=state["objects"],
custom_object_metadata=self.__custom_object_metadata,
)
# End AI2-THOR Unity process
self.controller.stop()
self.controller = None
# Save log file and change current_episode metadata in the base
super().done(file_name=file_name)
def __argmin(self, lst):
"""
Return the index of the least element in l
"""
return lst.index(min(lst))
def __get_nearest_object_face_to_position(self, obj, pos):
"""
Examine the AI2-THOR property 'axisAlignedBoundingBox'['cornerPoints'] and return the pose closest to target
pose specified in param pos
:param obj: the object to examine the faces of
:param pos: the target position to get near
"""
coords = ["x", "y", "z"]
if obj["pickupable"]:
# For pickupable objects we don't actually need to examine corner points and doing so sometimes causes
# errors with clones
return obj["position"]
xzy_obj_face = {
c: obj["axisAlignedBoundingBox"]["cornerPoints"][
self.__argmin(
[
np.abs(obj["axisAlignedBoundingBox"]["cornerPoints"][pidx][coords.index(c)] - pos[c])
for pidx in range(len(obj["axisAlignedBoundingBox"]["cornerPoints"]))
]
)
][coords.index(c)]
for c in coords
}
return xzy_obj_face
def __aim_camera_at_object(self, obj, camera_id):
"""
Position camera specified by camera_id such that object obj is visible; Used to set target object view for
TEACh data collection interface
:param obj: Object to face - an element of the output of get_objects()
:param camera_id: A valid camera ID
"""
nav_point_idx = self.__get_nav_graph_point(obj["position"]["x"], obj["position"]["z"])
face_obj_rot = self.__get_nav_graph_rot(
self.navigation_points[nav_point_idx]["x"],
self.navigation_points[nav_point_idx]["z"],
obj["position"]["x"],
obj["position"]["z"],
)
# Calculate the angle at which to look at the object to center it.
# We look from the head height of the agent [https://github.com/allenai/ai2thor/issues/266]
# Head gaze is the hypotenuse of a right triangle whose legs are the xz (floor) distance to the obj and the
# difference in gaze versus object height.
# To get the object 'face' instead of center (which could be out of frame, especially for large objects like
# drawers and cabinets), we decide the x,z,y position of the obj as the min distance to its corners.
xzy_obj_face = self.__get_nearest_object_face_to_position(obj, self.navigation_points[nav_point_idx])
xz_dist = np.sqrt(
np.power(xzy_obj_face["x"] - self.navigation_points[nav_point_idx]["x"], 2)
+ np.power(xzy_obj_face["z"] - self.navigation_points[nav_point_idx]["z"], 2)
)
y_diff = 1.8 - xzy_obj_face["y"]
theta = np.arctan(y_diff / xz_dist) * 180.0 / np.pi if not np.isclose(xz_dist, 0) else 0
action = dict(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=camera_id,
rotation=dict(x=theta, y=self.__get_y_rot_from_xz(face_obj_rot[0], face_obj_rot[1]), z=0),
position=dict(
x=self.navigation_points[nav_point_idx]["x"], y=1.8, z=self.navigation_points[nav_point_idx]["z"]
),
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
self.controller.step(action)
return nav_point_idx, face_obj_rot
def teleport_agent_to_face_object(self, obj, agent_id, force_face=None, get_closest=True):
"""
Move agent to a position where object obj is visible
:param obj: Object to face - an element of the output of get_objects()
:param agent_id: 0 for Commander and 1 for Driver/ Follower
:param force_face: Specify a particular target rotation
:param get_closest: If True the agent is always places at closest position; if false, nucleus sampling within
a distance radius around the target object is used
"""
# Get point and facing direction.
tried_points = set()
face_obj_rot = nav_point_idx = None
while face_obj_rot is None or (force_face is not None and face_obj_rot != force_face):
nav_point_idx = self.__get_nav_graph_point(
obj["position"]["x"], obj["position"]["z"], exclude_points=tried_points, get_closest=get_closest
)
if nav_point_idx is None:
return False, None, None
face_obj_rot = self.__get_nav_graph_rot(
self.navigation_points[nav_point_idx]["x"],
self.navigation_points[nav_point_idx]["z"],
obj["position"]["x"],
obj["position"]["z"],
)
tried_points.add(nav_point_idx)
if force_face is not None and force_face != face_obj_rot:
return False, nav_point_idx, face_obj_rot
# Teleport
agent_pose = (
self.controller.last_event.events[agent_id].metadata["agent"]
if self.commander_embodied
else self.controller.last_event.metadata["agent"]
)
action = dict(
action="Teleport",
agentId=agent_id,
rotation=dict(
x=agent_pose["rotation"]["x"],
y=self.__get_y_rot_from_xz(face_obj_rot[0], face_obj_rot[1]),
z=agent_pose["rotation"]["z"],
),
position=dict(
x=self.navigation_points[nav_point_idx]["x"],
y=agent_pose["position"]["y"],
z=self.navigation_points[nav_point_idx]["z"],
),
horizon=0,
)
if debug_print_all_sim_steps:
logger.info("step %s", action)
event = self.controller.step(action)
if not event.metadata["lastActionSuccess"]:
return False, nav_point_idx, face_obj_rot
return True, nav_point_idx, face_obj_rot
def obj_dist_to_nearest_agent(self, obj):
"""
Return Euclidean distance between a given object and the nearest agent in the sim.
"""
if self.commander_embodied:
# For immobile commander, only check what object is closest to driver.
events = [self.controller.last_event.events[0]]
else:
events = [self.controller.last_event]
ds = [
np.linalg.norm(
[
obj["position"]["x"] - e.metadata["agent"]["position"]["x"],
obj["position"]["y"] - e.metadata["agent"]["position"]["y"],
obj["position"]["z"] - e.metadata["agent"]["position"]["z"],
]
)
for e in events
]
return min(ds)
def __agent_dist_to_agent(self, agent_id_a, agent_id_b):
"""
Return Euclidean distance between two agents in the sim.
"""
a_agent_pos = self.controller.last_event.events[agent_id_a].metadata["agent"]["position"]
b_agent_pos = self.controller.last_event.events[agent_id_b].metadata["agent"]["position"]
return np.linalg.norm([a_agent_pos[c] - b_agent_pos[c] for c in ["x", "y", "z"]])
def check_episode_preconditions(self, task):
"""
Check whether the current simulator state is one in which the input task can be completed
:param task: Instance of Task_THOR; task to be checked
"""
return task.check_episode_preconditions(self, self.get_objects(self.controller.last_event))
def check_episode_progress(self, task):
"""
Check completion status of input task given the current simulator state
:param task: Instance of Task_THOR; task to be checked
:return: (task_desc:str, success:bool, subgoal_status:list)
Each element of subgoal_status is a dict with keys 'success':bool, 'description':str and 'steps':list
Each element of subgoal_status[idx]['steps'] is a dict with keys 'success':bool, 'objectId':str,
'objectType':str, 'desc':str
"""
progress_check_output = task.check_episode_progress(self.get_objects(self.controller.last_event), self)
return (
progress_check_output["description"],
progress_check_output["success"],
progress_check_output["subgoals"],
progress_check_output["goal_conditions_total"],
progress_check_output["goal_conditions_satisfied"],
)
def __get_nearest_object_matching_search_str(self, query, exclude_inventory=False):
"""
Obtain the nearest object to the commander OR driver matching the given search string.
:param query: the search string to check against AI2-THOR objectType of objects (uses fuzzy matching)
:param exclude_inventory: if True, don't include inventory objects as candidates (e.g., nothing held will return)
"""
closest_obj = closest_str_ratio = closet_obj_d_to_agent = None
if self.commander_embodied:
le = self.controller.last_event.events[0]
inv_objs = self.get_inventory_objects(self.controller.last_event.events[0])
inv_objs.extend(self.get_inventory_objects(self.controller.last_event.events[1]))
else:
le = self.controller.last_event
inv_objs = self.get_inventory_objects(le)
inv_obj_ids = [o["objectId"] for o in inv_objs]
for obj in le.metadata["objects"]:
if exclude_inventory and obj["objectId"] in inv_obj_ids:
logger.info("%s in inv; skipping" % obj["objectId"])
continue
str_ratio = fuzz.ratio(obj["objectType"], query)
if (
str_ratio > 0
and
# Closer string match or equal string match but closer to agent
(
closest_obj is None
or str_ratio > closest_str_ratio
or
# Physically closer to closest agent.
(str_ratio == closest_str_ratio and self.obj_dist_to_nearest_agent(obj) < closet_obj_d_to_agent)
)
):
closest_obj = obj
closest_str_ratio = str_ratio
closet_obj_d_to_agent = self.obj_dist_to_nearest_agent(obj)
return closest_obj
def __get_random_object_matching_search_str(self, query, exclude_inventory=False):
"""
Obtain a random object to the commander OR driver matching the given search string.
:param query: the search string to check against AI2-THOR objectType of objects (uses fuzzy matching)
:param exclude_inventory: if True, don't include inventory objects as candidates (e.g., nothing held will return)
"""
if self.commander_embodied:
le = self.controller.last_event.events[0]
inv_objs = self.get_inventory_objects(self.controller.last_event.events[0])
inv_objs.extend(self.get_inventory_objects(self.controller.last_event.events[1]))
else:
le = self.controller.last_event
inv_objs = self.get_inventory_objects(le)
inv_obj_ids = [o["objectId"] for o in inv_objs]
candidate_objects = self.get_objects(le)
if exclude_inventory:
candidate_objects = [obj for obj in candidate_objects if obj["objectId"] not in inv_obj_ids]
str_ratios = [fuzz.ratio(obj["objectType"], query) for obj in candidate_objects]
max_ratio = np.max(str_ratios)
max_ratio_idxs = [idx for idx in range(len(str_ratios)) if np.isclose(max_ratio, str_ratios[idx])]
closest_match_objects = [candidate_objects[idx] for idx in max_ratio_idxs]
return np.random.choice(closest_match_objects)
def get_target_object_seg_mask(self, oid):
"""
Get a numpy array with 1s on oid segmentation mask and 0s elsewhere.
:param oid: ID of object to be highlighted in the mask
"""
r = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=oid, return_full_seg_mask=True
)
return r
def set_target_object_view(self, oid, search):
"""
Move target object third party camera to look at specified objectId and returns associated hotspots
:param oid: ID of object to be shown or None
:param search: if oid is None, search string to use for fuzzy matching of object type
"""
assert oid is None or search is None
le = self.controller.last_event.events[0] if self.commander_embodied else self.controller.last_event
if oid is None: # need to choose an oid via search first
if self.randomize_object_search:
obj = self.__get_random_object_matching_search_str(search, exclude_inventory=True)
else:
obj = self.__get_nearest_object_matching_search_str(search, exclude_inventory=True)
if obj is None:
return False
else:
obj = self.__get_object_by_id(le.metadata["objects"], oid)
if obj is False:
return False
# First, teleport the camera to the nearest navigable point to the object of interest.
if self.navigation_graph is None:
self.__generate_navigation_graph()
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(obj, self.object_target_camera_idx)
# Get hotspots of the object from this vantage point.
shown_obj_id = obj["objectId"]
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=obj["objectId"]
)
parent_receptacles = self.get_parent_receptacles(obj, self.get_objects(self.controller.last_event))
# Back off to container if object is fully occluded.
if len(enc_obj_hotspots["hotspots"]) == 0:
if parent_receptacles is not None and len(parent_receptacles) > 0:
logger.warning('no hotspots for obj "%s", so checking parentReceptacles' % obj["objectId"])
for receptacle_obj in parent_receptacles:
if "Floor" in receptacle_obj: # ignore the floor as a parent since hotspotting it isn't helpful
continue
logger.info("... trying %s" % receptacle_obj)
shown_obj_id = receptacle_obj
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=receptacle_obj
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Couldn't see receptacle, so recenter camera and get a new frame
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
le.get_object(receptacle_obj), self.object_target_camera_idx
)
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=receptacle_obj
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Put camera back on target object.
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
obj, self.object_target_camera_idx
)
if len(enc_obj_hotspots["hotspots"]) > 0:
break # got a hotspot view for this parent
if len(enc_obj_hotspots["hotspots"]) == 0:
logger.warning(
'no hotspots for obj "%s", and no parentReceptacles hotspots,' % obj["objectId"]
+ "so getting hotspots for nearest receptacle..."
)
nn_objs = [obj["objectId"]]
while len(nn_objs) < 6: # try limited number of nearby objects
nn_obj = self.__get_object_by_position(
le.metadata["objects"], obj["position"], ignore_object_ids=nn_objs
)
logger.info("... trying %s" % nn_obj["objectId"])
if nn_obj["receptacle"]:
if "Floor" not in nn_obj["objectId"]: # ignore the floor as a parent
shown_obj_id = nn_obj["objectId"]
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=nn_obj["objectId"]
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Couldn't see receptacle, so recenter camera and get a new frame
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
nn_obj, self.object_target_camera_idx
)
enc_obj_hotspots = self.get_hotspots(
agent_id=None, camera_id=self.object_target_camera_idx, object_id=nn_obj["objectId"]
)
if len(enc_obj_hotspots["hotspots"]) == 0:
# Put camera back on target object.
nav_point_idx, face_obj_rot = self.__aim_camera_at_object(
obj, self.object_target_camera_idx
)
if len(enc_obj_hotspots["hotspots"]) > 0:
break # got a hotspot view for this candidate receptacle
nn_objs.append(nn_obj["objectId"])
# If no receptacle hotspots can be found at all, just return the frame looking "at" the object.
if len(enc_obj_hotspots["hotspots"]) == 0:
logger.warning("no hotspots for parentReceptacles %s" % parent_receptacles)
shown_obj_id = ""
# Prep metadata to be sent up for UI.
obj_view_pos_norm = self.__get_click_normalized_position_from_xz(
self.navigation_points[nav_point_idx]["x"], self.navigation_points[nav_point_idx]["z"]
)
obj_data = {
"success": True,
"oid": obj["objectId"], # the object matching the query
"shown_oid": shown_obj_id, # The object whose hotspots are shown
"view_pos_norm": obj_view_pos_norm, # Location of the viewing camera on the topdown map
"view_rot_norm": [face_obj_rot[0], -face_obj_rot[1]], # flip y from thor coords
"pos_norm": self.__get_click_normalized_position_from_xz(obj["position"]["x"], obj["position"]["z"]),
}
obj_data.update({"view_%s" % k: enc_obj_hotspots[k] for k in enc_obj_hotspots}) # hotspot array and width data
return obj_data
def encode_image(self, img):
return super().encode_image(img)
def get_parent_receptacles(self, obj, objects):
"""
Recursively traces custom properties that track where objects were placed to identify receptacles of an object
when AI2-THOR's property parentReceptacles fails
:param obj: The object whose receptacles need to be identified
:param objects: Output of get_objects()
"""
if "parentReceptacles" in obj and obj["parentReceptacles"] is not None:
return obj["parentReceptacles"]
elif "simbotLastParentReceptacle" in obj:
immediate_parent_receptacle = obj["simbotLastParentReceptacle"]
if immediate_parent_receptacle is not None and immediate_parent_receptacle != obj["objectId"]:
# Second clause is to prevent infinite recursion in weird corner cases that should ideally never happen
parent_receptacles = [immediate_parent_receptacle]
immediate_parent_receptacle_obj = self.__get_object_by_id(objects, immediate_parent_receptacle)
if type(immediate_parent_receptacle_obj) == dict:
further_parent_receptacles = self.get_parent_receptacles(immediate_parent_receptacle_obj, objects)
if further_parent_receptacles is not None:
parent_receptacles += further_parent_receptacles
return parent_receptacles
return None
def success(self):
"""
When an episode ends, the parent function of done() will call this to see whether the episode can stop.
"""
return True # with the THOR backend, we can just say go ahead and stop
def __get_agent_poses(self):
"""
Return current poses of agents
"""
if self.controller is None:
return None
if self.commander_embodied:
cmd_xy = self.__get_agent_click_normalized_position(agent_id=0)
cmd_r = self.__get_agent_click_rotation(agent_id=0)
dri_xy = self.__get_agent_click_normalized_position(agent_id=1)
dri_r = self.__get_agent_click_rotation(agent_id=1)
return [(cmd_xy[0], cmd_xy[1], cmd_r[0], cmd_r[1]), (dri_xy[0], dri_xy[1], dri_r[0], dri_r[1])]
else:
e = self.controller.last_event
cmd_xy = self.__get_agent_click_normalized_position(agent_metadata=e.metadata["thirdPartyCameras"][0])
cmd_r = self.__get_agent_click_rotation(agent_metadata=e.metadata["thirdPartyCameras"][0])
dri_xy = self.__get_agent_click_normalized_position()
dri_r = self.__get_agent_click_rotation()
return [(cmd_xy[0], cmd_xy[1], cmd_r[0], cmd_r[1]), (dri_xy[0], dri_xy[1], dri_r[0], dri_r[1])]
def __get_nav_graph_point(self, thor_x, thor_z, exclude_points=None, get_closest=True):
"""
Get the index in the navigation graph nearest to the given x,z coord in AI2-THOR coordinates
:param thor_x: x coordinate on AI2-THOR floor plan
:param thor_z: z coordinate on AI2-THOR floor plan
:param exclude_points: Any navigation graph points that cannot be used
:param get_closest: If false, instead of returning closest navigation graph point, do nucleus sampling around
the coordinate; if True return the closest navigation graph point
"""
if self.navigation_graph is None:
self.__generate_navigation_graph()
t_point = nearest_t_d = None
distances = []
for idx in range(len(self.navigation_points)):
if exclude_points is not None and idx in exclude_points:
distances.append(float("inf"))
continue
d = np.abs(self.navigation_points[idx]["x"] - thor_x) + np.abs(self.navigation_points[idx]["z"] - thor_z)
distances.append(d)
if t_point is None or d < nearest_t_d:
t_point = idx
nearest_t_d = d
if not get_closest: # rather than returning closest point, do nucleus sampling on softmax of 1/d
scores = [np.exp(1.0 / d) for d in distances]
dps = {idx: scores[idx] / sum(scores) for idx in range(len(scores))}
dnps = {}
nucleus_density = 0.1
nucleus_sum = 0
for k, v in sorted(dps.items(), key=lambda item: item[1], reverse=True):
dnps[k] = v if nucleus_sum < nucleus_density or len(dnps) == 0 else 0
nucleus_sum += v
nps = [dnps[idx] for idx in range(len(scores))]
nps = [p / sum(nps) for p in nps]
t_point = np.random.choice(list(range(len(self.navigation_points))), p=nps)
return t_point
def __get_nav_graph_rot(self, thor_x, thor_z, thor_facing_x, thor_facing_z):
"""
Get the cardinal direction to rotate to, to be facing (thor_facing_x, thor_facing_x=z) when standing at
(thor_x, thor_z)
:param thor_x: x Coordinate on Ai2-THOR floor plan where agent is standing
:param thor_z: z Coordinate on Ai2-THOR floor plan where agent is standing
:param thor_facing_x: x Coordinate on Ai2-THOR floor plan where agent is desired to face
:param thor_facing_z: z Coordinate on Ai2-THOR floor plan where agent is desired to face
"""
# Determine target rotation.
if np.abs(thor_x - thor_facing_x) > np.abs(thor_z - thor_facing_z): # Difference is greater in the x direction.
if thor_x - thor_facing_x > 0: # Destination to the x left
t_rot = (-1, 0)
else:
t_rot = (1, 0)
else: # Difference is greater in the z direction
if thor_z - thor_facing_z > 0: # Destination to the z above
t_rot = (0, -1)
else:
t_rot = (0, 1)
return t_rot
def __generate_navigation_graph(self):
"""
Generate navigation graph: We construct a directed graph with nodes representing agent
position and rotation. For every occupiable grid point on the map, we create four nodes for each orientation.
Orientation nodes at a single occupiable point are connected with directed edges for turns.
Occupiable positions are connected with directed edges that preserve orientation.
"""
if debug_print_all_sim_steps:
logger.info("step %s", "GetReachablePositions")
event = self.controller.step(action="GetReachablePositions")
p = event.metadata["actionReturn"]
ng = nx.DiGraph()
rotations = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for idx in range(len(p)):
for rx, rz in rotations:
ng.add_node((idx, rx, rz))
for idx in range(len(p)):
for irx, irz in rotations:
for jrx, jrz in rotations:
if irx + jrx == 0 or irz + jrz == 0:
continue # antipodal or identical
ng.add_edge((idx, irx, irz), (idx, jrx, jrz))
for jdx in range(len(p)):
if idx == jdx:
continue
rx = rz = None
if np.isclose(p[idx]["z"] - p[jdx]["z"], 0):
if np.isclose(p[idx]["x"] - p[jdx]["x"], self.grid_size):
rx = -1
rz = 0
elif np.isclose(p[idx]["x"] - p[jdx]["x"], -self.grid_size):
rx = 1
rz = 0
elif np.isclose(p[idx]["x"] - p[jdx]["x"], 0):
if np.isclose(p[idx]["z"] - p[jdx]["z"], self.grid_size):
rx = 0
rz = -1
elif | np.isclose(p[idx]["z"] - p[jdx]["z"], -self.grid_size) | numpy.isclose |
# HaloFeedback
import warnings
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simpson
from scipy.special import ellipeinc, ellipkinc, ellipe, betainc
from scipy.special import gamma as Gamma
from scipy.special import beta as Beta
# ------------------
G_N = 4.300905557082141e-3 # [(km/s)^2 pc/M_sun] [Legacy: 4.3021937e-3]
c = 299792.458 # [km/s] [Legacy: 2.9979e5]
# Conversion factors
pc_to_km = 3.085677581491367e13 # [km] [Legacy: 3.085677581e13]
# Numerical parameters
N_GRID = 10000 # Number of grid points in the specific energy.
N_KICK = 50 # Number of points to use for integration over Delta-epsilon. [Legacy: 50]
float_2eps = 2.0 * np.finfo(float).eps
# ------------------
def ellipeinc_alt(phi, m):
""" An alternative elliptic function that is valid for m > 1."""
beta = np.arcsin(np.clip(np.sqrt(m) * np.sin(phi), 0, 1))
return np.sqrt(m) * ellipeinc(beta, 1 / m) + ((1 - m) / np.sqrt(m)) * ellipkinc(beta, 1 / m)
class DistributionFunction(ABC):
"""
Base class for phase space distribution of a DM spike surrounding a black
hole with an orbiting body. Child classes must implement the following:
Methods
- rho_init(): initial density function
- f_init() initial phase-space distribution function
Attributes
- r_sp: DM halo extent [pc]. Used for making grids for the calculation.
- IDstr_model: ID string used for file names.
"""
def __init__(self, m1: float = 1e3, m2: float = 1.0, mDM: float = 0):
self.m1 = m1 # [M_sun]
self.m2 = m2 # [M_sun]
self.mDM = mDM # [M_sun]
self.r_isco = 6.0 * G_N * m1 / c ** 2
# Initialise grid of r, eps and f(eps) and append an extra loose grid far away.
self.r_grid = np.geomspace(self.r_isco, 1e5 * self.r_isco, int(0.9 *N_GRID))
self.r_grid = np.append(
self.r_grid, np.geomspace(1.01 * self.r_grid[-1], 1e3 * self.r_sp, int(0.1*N_GRID))
)
self.r_grid = np.sort(self.r_grid)
self.eps_grid = self.psi(self.r_grid)
self.f_eps = self.f_init(self.eps_grid)
# Density of states
self.DoS = (
np.sqrt(2) * (np.pi * G_N * self.m1) ** 3 * self.eps_grid ** (-5/2)
)
# Define a string which specifies the model parameters
# and numerical parameters (for use in file names etc.)
self.IDstr_num = "lnLambda=%.1f" % (np.log(np.sqrt(m2/m1)),)
@abstractmethod
def rho_init(self, r):
""" The initial dark matter density [M_sun/pc^3] of the system at distance r from the
halo center.
Parameters:
- r : distance [pc] from center of spike.
"""
pass
@abstractmethod
def f_init(self, eps):
""" The initial phase-space distribution function at energy eps.
Parameters
- eps : float or np.array Energy per unit mass in (km/s)^2
"""
pass
def plotDF(self):
""" Plots the initial and current distribution function of the spike. """
plt.figure()
plt.loglog(self.eps_grid, self.f_init(self.eps_grid), "k--", label = "Initial DF")
plt.loglog(self.eps_grid, self.f_eps)
plt.ylabel(r"$f(\mathcal{E})$ [$M_\odot$ pc$^{-3}$ (km/s)$^{-3}$]")
plt.xlabel(r"$\mathcal{E} = \Psi(r) - \frac{1}{2}v^2$ [(km/s)$^2$]")
plt.legend()
plt.show()
return plt.gca()
def psi(self, r: float) -> float:
""" The gravitational potential [km^2/s^2] at distance r [pc]."""
return G_N *self.m1 /r # [km^2/s^2]
def v_max(self, r: float) -> float:
""" The maximum velocity [km/s] allowed for bound orbits in the system at position r [pc]."""
return np.sqrt(2 * self.psi(r)) # [km/s]
def rho(self, r: float, v_cut: float = -1) -> float:
""" Returns the local density [M_sun/pc^3] of the dark matter particles at position
r [pc] from the halo center, that move slower than v_cut [km/s].
Parameters:
- r: The distance from the dark matter halo center.
- v_cut : maximum speed to include in density calculation
(defaults to v_max if not specified)
"""
if v_cut < 0: v_cut = self.v_max(r)
v_cut = np.clip(v_cut, 0, self.v_max(r))
vlist = np.sqrt(np.linspace(0, v_cut ** 2, 20000))
# Interpolate the integrand onto the new array vlist.
flist = np.interp(self.psi(r) - 0.5 * vlist ** 2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 2 * flist
return 4 * np.pi *simpson(integ, vlist) # [M_sun/pc^3]
def averageVelocity(self, r: float) -> float:
""" Returns the local average velocity [km/s] <u> from the velocity distribution of the
dark matter particles at position r [pc] from the halo center.
"""
v_cut = self.v_max(r)
# Interpolate the integrand onto the new array vlist.
v_cut = np.clip(v_cut, 0, self.v_max(r))
vlist = np.sqrt(np.linspace(0, v_cut**2, 250))
flist = np.interp(self.psi(r) -0.5 *vlist **2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 3 * flist
return np.sqrt(np.trapz(integ, vlist) / np.trapz(vlist ** 2 * flist, vlist)) # [km/s]
def averageSquaredVelocity(self, r: float) -> float:
""" Returns the local average squared velocity [km/s] <u^2> (or root mean squared velocity) from the velocity distribution of the
dark matter particles at position r [pc] from the halo center.
"""
v_cut = self.v_max(r)
# Interpolate the integrand onto the new array vlist.
v_cut = np.clip(v_cut, 0, self.v_max(r))
vlist = np.sqrt(np.linspace(0, v_cut**2, 250))
flist = np.interp(self.psi(r) -0.5 *vlist **2,
self.eps_grid[::-1], self.f_eps[::-1],
left = 0, right = 0,
)
integ = vlist ** 4 * flist
return np.sqrt(np.trapz(integ, vlist) / np.trapz(vlist ** 2 * flist, vlist)) # [km/s]
def velocityDispersion(self, r: float) -> float:
""" Returns the local velocity dispersion [km/s] from the velocity distribution of the dark matter
particles at position r [pc] from the halo center.
"""
u2 = self.averageSquaredVelocity(r)
u = self.averageSquaredVelocity(r)
return np.sqrt(u2 -u**2) # [km/s]
def m(self) -> float:
""" The total mass [M_sun] of the binary system. """
return self.m1 +self.m2 # [M_sun]
def mu(self) -> float:
""" The reduced mass [M_sun] of the binary system. """
return self.m1 *self.m2 /self.m() # [M_sun]
def totalMass(self) -> float:
""" The total mass of dark matter particles in the halo. """
return simpson(-self.P_eps(), self.eps_grid)
def totalEnergy(self) -> float:
""" The total energy of the dark matter halo. """
return simpson(-self.P_eps() * self.eps_grid, self.eps_grid)
def b_90(self, r2: float, Delta_u: float) -> float:
""" The impact parameter [pc] at which dark matter particles are deflected at a 90 degree angle.
Delta_u relative velocity of the orbiting body and dark matter particles, usually set at u_orb
of the companion object m2.
"""
return G_N *(self.m2 +self.mDM) / (Delta_u ** 2) # [pc]
def b_min(self, r2: float, v_orb: float) -> float:
""" The minimum impact parameter [pc] is the radius of the companion m2. """
return self.R/pc_to_km if self.R != -1 else 6.0 * G_N * self.m2/ c ** 2 # [pc]
def b_max(self, r2: float, v_orb: float = -1) -> float:
""" The maximum impact parameter [pc] as calculated from gravitational force equivalance O(sqrt(q)).
Parameters:
- r2 is the separation [pc] of the two components.
- v_orb is the instant velocity [km/s] of the orbiting body. If not specified, defaults to circular orbital velocity.
"""
if v_orb == -1: v_orb = np.sqrt(G_N * (self.m1 + self.m2) / r2) # [km/s]
return np.sqrt(self.m2/self.m1) *r2 # [pc]
def Lambda(self, r2: float, v_orb: float = -1) -> float:
""" The coulomb logarithm of the dynamical friction force induced by the dark matter particles.
Parameters:
- r2 is the separation [pc] of the two components.
- v_orb is the instant velocity [km/s] of the orbiting body. If not specified, defaults to circular orbital velocity.
"""
if v_orb == -1: v_orb = np.sqrt(G_N * (self.m1 + self.m2) / r2) # [km/s]
b90 = self.b_90(r2, v_orb) # [pc]
return np.sqrt((self.b_max(r2, v_orb)**2 +b90**2)/(self.b_min(r2, v_orb)**2 +b90**2))
def eps_min(self, r2: float, v_orb: float) -> float:
""" The minimum energy for the average delta_eps calculation in calc_delta_eps()."""
return 2 * v_orb ** 2 / (1 + self.b_max(r2, v_orb) ** 2 / self.b_90(r2, v_orb) ** 2)
def eps_max(self, r2: float, v_orb: float) -> float:
return 2 * v_orb ** 2 / (1 + self.b_min(r2, v_orb) ** 2 / self.b_90(r2, v_orb) ** 2)
def df(self, r2: float, v_orb: float, v_cut: float = -1) -> np.array:
"""The change of the distribution function f(eps) during an orbit.
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles).
"""
df_minus = self.df_minus(r2, v_orb, v_cut, N_KICK)
df_plus = self.df_plus(r2, v_orb, v_cut, N_KICK)
# TODO: What is this meant for?
N_plus = 1 # np.trapz(self.DoS*f_plus, self.eps_grid)
N_minus = 1 # np.trapz(-self.DoS*f_minus, self.eps_grid)
return df_minus + df_plus *(N_minus/N_plus)
def dfdt(self, r2: float, v_orb: float, v_cut: float = -1) -> np.array:
"""Time derivative of the distribution function f(eps).
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles).
"""
T_orb = self.T_orb(r2) # [s]
return self.df(r2, v_orb, v_cut) /T_orb
def delta_f(self, r0: float, v_orb: float, dt: float, v_cut: float = -1) -> np.array:
"""[Deprecated] This shouldn't be used in new applications. TODO: Remove?
Change in f over a time-step dt where it is automatically
adjusted to prevent f_eps from becoming negative.
Parameters:
- r2 is the radial position [pc] of the perturbing body.
- v_orb is the orbital velocity [km/s] of the perturbing body.
- dt: time-step [s]
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles).
"""
f_minus = self.dfdt_minus(r0, v_orb, v_cut, N_KICK) * dt
# Don't remove more particles than there are particles...
correction = np.clip(self.f_eps / (-f_minus + 1e-50), 0, 1)
f_minus = np.clip(f_minus, -self.f_eps, 0)
f_plus = self.dfdt_plus(r0, v_orb, v_cut, N_KICK, correction) * dt
return f_minus + f_plus
def P_delta_eps(self, r: float, v: float, delta_eps: float) -> float:
""" Calcuate PDF for delta_eps. """
norm = self.b_90(r, v) ** 2 / (self.b_max(r, v) ** 2 - self.b_min(r, v) ** 2)
return 2 * norm * v ** 2 / (delta_eps ** 2)
def P_eps(self):
"""Calculate the PDF d{P}/d{eps}"""
return (
np.sqrt(2)
* np.pi ** 3
* (G_N * self.m1) ** 3
* self.f_eps
/ self.eps_grid ** 2.5
)
def calc_delta_eps(self, r: float, v: float, n_kick: int = 1) -> list:
""" Calculate average delta_eps integrated over different bins (and the corresponding
fraction of particles which scatter with that delta_eps).
"""
eps_min = self.eps_min(r, v)
eps_max = self.eps_max(r, v)
norm = self.b_90(r, v) ** 2 / (self.b_max(r, v) ** 2 - self.b_min(r, v) ** 2)
eps_edges = np.linspace(eps_min, eps_max, n_kick + 1)
def F_norm(eps):
return -norm * 2 * v ** 2 / (eps)
def F_avg(eps):
return -norm * 2 * v ** 2 * np.log(eps)
frac = np.diff(F_norm(eps_edges))
eps_avg = np.diff(F_avg(eps_edges)) / frac
return eps_avg, frac
def dEdt_DF(self, r: float, v_orb: float = -1, v_cut: float = -1, average: bool = False) -> float:
"""Rate of change of energy due to DF (km/s)^2 s^-1 M_sun.
Parameters:
- r is the radial position of the perturbing body [pc]
- v_orb the velocity [km/s] of the body, when not given assume circular Keplerian orbits.
- v_cut (optional), only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles)
- average determines whether to average over different radii
(average = False is default and should be correct).
"""
if v_orb < 0: v_orb = np.sqrt(G_N * (self.m1 + self.m2) / r) # [km/s]
if average:
warnings.warn(
"Setting 'average = True' is not necessarily the right thing to do..."
)
r_list = r + np.linspace(-1, 1, 3) * self.b_max(r, v_orb)
rho_list = np.array([self.rho(r1, v_cut) for r1 in r_list])
rho_eff = np.trapz(rho_list * r_list, r_list) / np.trapz(r_list, r_list)
else:
rho_eff = self.rho(r, v_cut)
return 4 *np.pi * G_N **2 * self.m2 *(self.m2 +self.mDM) * rho_eff * np.log(self.Lambda(r, v_orb)) / v_orb /pc_to_km # [km]
def E_orb(self, a: float) -> float:
""" The orbital energy of the binary system at semi-major axis [pc]. """
return -0.5 * G_N * (self.m1 + self.m2) / a
def T_orb(self, a: float) -> float:
""" The orbital period of the binary system at semi-major axis [pc]. """
return (2 * np.pi * np.sqrt(pc_to_km ** 2 * a ** 3 / (G_N * (self.m1 + self.m2))) ) # [s]
def interpolate_DF(self, eps_old, correction = 1):
""" Internal function for interpolating the DF on df_plus calculations. """
# Distribution of particles before they scatter
if hasattr(correction, "__len__"):
f_old = np.interp(
eps_old[::-1],
self.eps_grid[::-1],
self.f_eps[::-1] * correction[::-1],
left=0,
right=0,
)[::-1]
else:
f_old = np.interp(
eps_old[::-1], self.eps_grid[::-1], self.f_eps[::-1], left=0, right=0
)[::-1]
return f_old
def delta_eps_of_b(self, r2: float, v_orb: float, b: float) -> float:
""" The change of energy based on the impact parameter of the scattering. """
b90 = self.b_90(r2, v_orb) # [pc]
return -2 * v_orb ** 2 * (1 + b**2 / b90**2) ** -1
# ---------------------
# ----- df/dt ----
# ---------------------
def df_minus(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = 1) -> np.array:
"""Particles to remove from the distribution function at energy E. """
if v_cut < 0: v_cut = self.v_max(r0)
df = np.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * np.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = np.geomspace(self.b_min(r0, v_orb), self.b_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = np.append(step, 0)
step = np.append(0, step)
# Make sure that the integral is normalised correctly
renorm = np.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renorm
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Define which energies are allowed to scatter
mask = (self.eps_grid > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2) & (
self.eps_grid < self.psi(r0) * (1 + b / r0)
)
r_eps = G_N * self.m1 / self.eps_grid[mask]
r_cut = G_N * self.m1 / (self.eps_grid[mask] + 0.5 * v_cut ** 2)
L1 = np.minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = np.arccos(L1)
L2 = np.maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = np.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = np.zeros(len(m))
if np.any(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(np.pi - alpha2[mask1]) / 2, m[mask1]
)
if np.any(mask2):
N1[mask2] = ellipeinc_alt((np.pi - alpha1[mask2]) / 2, m[mask2])
df[mask] += (
-frac
* self.f_eps[mask]
* (1 + b ** 2 / self.b_90(r0, v_orb) ** 2) ** 2
* np.sqrt(1 - r0 / r_eps + b / r0)
* N1
)
norm = (
2
* np.sqrt(2 * (self.psi(r0)))
* 4
* np.pi ** 2
* r0
* (self.b_90(r0, v_orb) ** 2 / (v_orb) ** 2)
)
result = norm * df / self.DoS
result[self.eps_grid >= 0.9999 *self.psi(self.r_isco)] *= 0
return result
def df_plus(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = 1, correction = 1) -> np.array:
"""Particles to add back into distribution function from E - dE -> E. """
if v_cut < 0: v_cut = self.v_max(r0)
df = np.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * np.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = np.geomspace(self.b_min(r0, v_orb), self.b_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = np.append(step, 0)
step = np.append(0, step)
# Make sure that the integral is normalised correctly
renorm = np.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renorm
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Value of specific energy before the kick
eps_old = self.eps_grid - delta_eps
# Define which energies are allowed to scatter
mask = (eps_old > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2) & (
eps_old < self.psi(r0) * (1 + b / r0)
)
# Sometimes, this mask has no non-zero entries
if np.any(mask):
r_eps = G_N * self.m1 / eps_old[mask]
r_cut = G_N * self.m1 / (eps_old[mask] + 0.5 * v_cut ** 2)
# Distribution of particles before they scatter
f_old = self.interpolate_DF(eps_old[mask], correction)
L1 = np.minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = np.arccos(L1)
L2 = np.maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = np.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = np.zeros(len(m))
if np.any(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(np.pi - alpha2[mask1]) / 2, m[mask1]
)
if np.any(mask2):
N1[mask2] = ellipeinc_alt(
(np.pi - alpha1[mask2]) / 2, m[mask2]
) # - ellipeinc_alt((np.pi - alpha2[mask2])/2, m[mask2])
df[mask] += (
frac
* f_old
* (1 + b ** 2 / self.b_90(r0, v_orb) ** 2) ** 2
* np.sqrt(1 - r0 / r_eps + b / r0)
* N1
)
norm = (
2
* np.sqrt(2 * (self.psi(r0)))
* 4
* np.pi ** 2
* r0
* (self.b_90(r0, v_orb) ** 2 / (v_orb) ** 2)
)
result = norm * df / self.DoS
result[self.eps_grid >= 0.9999 *self.psi(self.r_isco)] *= 0
return result
def dEdt_ej(self, r0: float, v_orb: float, v_cut: float = -1, n_kick: int = N_KICK, correction = np.ones(N_GRID)):
"""Calculate carried away by particles which are completely unbound.
Parameters:
- r0 : radial position of the perturbing body [pc]
- v_orb: orbital velocity [km/s]
- v_cut: optional, only scatter with particles slower than v_cut [km/s]
defaults to v_max(r) (i.e. all particles)
- n_kick: optional, number of grid points to use when integrating over
Delta-eps (defaults to N_KICK = 100).
"""
if v_cut < 0: v_cut = self.v_max(r0)
T_orb = (2 * np.pi * r0 * pc_to_km) / v_orb
dE = np.zeros(N_GRID)
# Calculate sizes of kicks and corresponding weights for integration
if n_kick == 1: # Replace everything by the average if n_kick = 1
delta_eps_list = (
-2 * v_orb ** 2 * np.log(1 + self.Lambda(r0, v_orb) ** 2) / self.Lambda(r0, v_orb) ** 2,
)
frac_list = (1,)
else:
b_list = np.geomspace(self.b_min(r0, v_orb), self.b_max(r0, v_orb), n_kick)
delta_eps_list = self.delta_eps_of_b(r0, v_orb, b_list)
# Step size for trapezoidal integration
step = delta_eps_list[1:] - delta_eps_list[:-1]
step = np.append(step, 0)
step = np.append(0, step)
# Make sure that the integral is normalised correctly
renorm = np.trapz(self.P_delta_eps(r0, v_orb, delta_eps_list), delta_eps_list)
frac_list = 0.5 * (step[:-1] + step[1:]) / renorm
# Sum over the kicks
for delta_eps, b, frac in zip(delta_eps_list, b_list, frac_list):
# Maximum impact parameter which leads to the ejection of particles
b_ej_sq = self.b_90(r0, v_orb) ** 2 * ((2 * v_orb ** 2 / self.eps_grid) - 1)
# Define which energies are allowed to scatter
mask = (
(self.eps_grid > self.psi(r0) * (1 - b / r0) - 0.5 * v_cut ** 2)
& (self.eps_grid < self.psi(r0) * (1 + b / r0))
& (b ** 2 < b_ej_sq)
)
r_eps = G_N * self.m1 / self.eps_grid[mask]
r_cut = G_N * self.m1 / (self.eps_grid[mask] + 0.5 * v_cut ** 2)
if np.any(mask):
L1 = np.minimum((r0 - r0 ** 2 / r_eps) / b, 0.999999)
alpha1 = np.arccos(L1)
L2 = np.maximum((r0 - r0 ** 2 / r_cut) / b, -0.999999)
alpha2 = np.arccos(L2)
m = (2 * b / r0) / (1 - (r0 / r_eps) + b / r0)
mask1 = (m <= 1) & (alpha2 > alpha1)
mask2 = (m > 1) & (alpha2 > alpha1)
N1 = np.zeros(len(m))
if np.any(mask1):
N1[mask1] = ellipe(m[mask1]) - ellipeinc(
(np.pi - alpha2[mask1]) / 2, m[mask1]
)
if | np.any(mask2) | numpy.any |
#!/usr/bin/env python
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generates target corner detection files from a set of bagfiles in
a directory containing images. Optionally prunes overlapping detections
to help prevent having too many detections in one area and potentially
biasing the calibrator.
"""
import argparse
import os
import sys
import aslam_cv_backend as acvb
import cv2
import get_bags_with_topic
import kalibr_camera_calibration as kcc
import kalibr_common as kc
import numpy as np
class Corner:
def __init__(self, corner_id, target_corner, image_corner):
self.id = corner_id
self.target_corner = np.array([target_corner[0], target_corner[1]])
self.image_corner = np.array([image_corner[0], image_corner[1]])
class Corners:
def __init__(self, observation):
self.id_corner_map = {}
target_corners = observation.getCornersTargetFrame()
image_corners = observation.getCornersImageFrame()
ids = observation.getCornersIdx()
for i in range(len(target_corners)):
corner = Corner(ids[i], target_corners[i], image_corners[i])
self.id_corner_map[corner.id] = corner
def similar(self, other_corners, threshold):
# Make sure keys are the same
if not set(self.id_corner_map.keys()) == set(
other_corners.id_corner_map.keys()
):
return False
norm_sums = 0
for corner_id in self.id_corner_map.keys():
a = (
self.id_corner_map[corner_id].image_corner
- other_corners.id_corner_map[corner_id].image_corner
)
image_diff_norm = np.linalg.norm(
self.id_corner_map[corner_id].image_corner
- other_corners.id_corner_map[corner_id].image_corner
)
norm_sums += image_diff_norm
mean_norm = norm_sums / float(len(self.id_corner_map.keys()))
if mean_norm < threshold:
print(
"Ignoring image, mean "
+ str(mean_norm)
+ " below threshold "
+ str(threshold)
)
return True
return False
class AddedCorners:
def __init__(self, threshold):
self.corners = []
self.threshold = threshold
def add_corners(self, corners):
self.corners.append(corners)
def redundant(self, new_corners):
if len(self.corners) == 0:
return False
for corners in self.corners:
if corners.similar(new_corners, self.threshold):
return True
return False
def save_corners(observation, filename):
target_corners = observation.getCornersTargetFrame()
image_corners = observation.getCornersImageFrame()
ids = observation.getCornersIdx()
with open(filename, "w") as corners_file:
for i in range(len(target_corners)):
corners_file.write(
"%0.17g %0.17g %0.17g %0.17g %0.17g\n"
% (
ids[i],
target_corners[i][0],
target_corners[i][1],
image_corners[i][0],
image_corners[i][1],
)
)
def save_images_from_dataset_with_target_detections(
dataset, detector, output_directory, added_corners
):
for timestamp, image in dataset.readDataset():
success, observation = detector.findTargetNoTransformation(
timestamp, | np.array(image) | numpy.array |
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Some math functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import numpy as np
import scipy.linalg as la
from ..hyp_defs import float_cpu
def logdet_pdmat(A):
"""Log determinant of positive definite matrix.
"""
assert(A.shape[0] == A.shape[1])
R=la.cholesky(A)
return 2*np.sum(np.log(np.diag(R)))
def invert_pdmat(A, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of positive definite matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Positive definite matrix
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Cholesky transform of A upper triangular
Log determinant of A
A^{-1}
"""
assert(A.shape[0] == A.shape[1])
R=la.cholesky(A, lower=False)
if right_inv:
fh=lambda x: la.cho_solve((R, False), x.T).T
else:
fh=lambda x: la.cho_solve((R, False), x)
#fh=lambda x: la.solve_triangular(R, la.solve_triangular(R.T, x, lower=True), lower=False)
r = [fh, R]
logdet = None
invA = None
if return_logdet:
logdet=2*np.sum(np.log(np.diag(R)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r
def invert_trimat(A, lower=False, right_inv=False, return_logdet=False, return_inv=False):
"""Inversion of triangular matrices.
Returns lambda function f that multiplies the inverse of A times a vector.
Args:
A: Triangular matrix.
lower: if True A is lower triangular, else A is upper triangular.
right_inv: If False, f(v)=A^{-1}v; if True f(v)=v' A^{-1}
return_logdet: If True, it also returns the log determinant of A.
return_inv: If True, it also returns A^{-1}
Returns:
Lambda function that multiplies A^{-1} times vector.
Log determinant of A
A^{-1}
"""
if right_inv:
fh=lambda x: la.solve_triangular(A.T, x.T, lower=not(lower)).T
else:
fh=lambda x: la.solve_triangular(A, x, lower=lower)
if return_logdet or return_inv:
r = [fh]
else:
r = fh
if return_logdet:
logdet=np.sum(np.log(np.diag(A)))
r.append(logdet)
if return_inv:
invA=fh(np.eye(A.shape[0]))
r.append(invA)
return r
def softmax(r, axis=-1):
"""
Returns:
y = \exp(r)/\sum(\exp(r))
"""
max_r=np.max(r, axis=axis, keepdims=True)
r=np.exp(r-max_r)
r/= | np.sum(r, axis=axis, keepdims=True) | numpy.sum |
Subsets and Splits