prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
from collections import namedtuple
from tqdm import tqdm
import numpy as np
import freesasa as fs
class GraphCreator:
def __init__(self, pdb_file_list, graph_label_dict):
""" Create graphs with the provided pdb files and labels.
Parameters:
pdb_file_list (list): list of pdb file paths
graph_label_dict (dict): dictionary with pdb ids as keys and ground
truth labels as values
"""
self.pdb_file_list = pdb_file_list
self.graph_label_dict = graph_label_dict
def get_adjacency_matrix(self, pfh_class, starting_number):
""" Save data into graph format
"""
edges = []
for d in pfh_class.within_range:
for k in d:
edges.append(list(map(int, k.split("/"))))
ret = ""
for e in edges:
i = 0
while i < len(e):
e[i] += starting_number
i += 1
ret += str(e)[1:-1] + "\n"
return ret
def save_graph(self,
file_prefix: str,
cutoff=10,
save_path="."):
"""Convert pdb files to graph
"""
save_path = os.path.join(save_path, "cutoff"+str(cutoff))
os.makedirs(save_path, exist_ok=True)
a = open(os.path.join(save_path, file_prefix+"_A.txt"), "w")
idc = open(
os.path.join(save_path, file_prefix+"_graph_indicator.txt"), "w")
g_label = open(
os.path.join(save_path, file_prefix+"_graph_labels.txt"), "w")
n_label = open(
os.path.join(save_path, file_prefix+"_node_labels.txt"), "w")
starting_number = 1
pdb_idx = 1
for pdb in tqdm(self.pdb_file_list):
if not (pdb.endswith(".pdb") or pdb.endswith(".cif")):
continue
try:
pdb_id = os.path.basename(pdb).split(".")[0].upper()
label = self.graph_label_dict[pdb_id]
except KeyError:
print(f"{pdb_id} label not found.")
continue
pdb_pfh = PDBtoPFH(pdb, cutoff=cutoff)
try:
pdb_pfh.get_attributes()
except AssertionError:
continue
except Exception:
continue
a.write(self.get_adjacency_matrix(pdb_pfh, starting_number))
idc.write((str(pdb_idx)+"\n")*len(pdb_pfh.all_ca))
g_label.write(str(label)+"\n")
for attr in pdb_pfh.ca_attributes:
n_label.write(str(attr)[1:-1]+"\n")
starting_number += len(pdb_pfh.all_ca)
pdb_idx += 1
a.close()
idc.close()
g_label.close()
n_label.close()
class PDBtoPFH():
def __init__(self, file_path, cutoff=7):
self.file_path = file_path
self.cutoff = cutoff
self.resi_to_int = {
"ALA": 0,
"CYS": 1,
"ASP": 2,
"GLU": 3,
"PHE": 4,
"GLY": 5,
"HIS": 6,
"ILE": 7,
"LYS": 8,
"LEU": 9,
"MET": 10,
"ASN": 11,
"PRO": 12,
"GLN": 13,
"ARG": 14,
"SER": 15,
"THR": 16,
"VAL": 17,
"TRP": 18,
"TYR": 19,
"NAA": 20 # not a amino acid
}
self.Atom = namedtuple(
"atom",
"atom_id atom_name residue_id residue_name x y z")
def get_attributes(self):
# read pdb file
with open(self.file_path, "r") as f:
self.data = f.readlines()
# calculate solvent access data
try:
self.solvent_access = fs.calc(fs.Structure(self.file_path))
except Exception:
raise
self._clean_data()
try:
self._ca_attributes()
except AssertionError:
raise
self._distance_to_others()
self._find_in_range()
# clean data, save as {atom_type/residue_index: coordinates}
def _clean_data(self):
self.cl_data = list()
for d in self.data:
if not d.startswith("ATOM"):
continue
if d.startswith("ENDMDL"):
break
try:
atom_id = int(d[6:11].strip())
res_id = int(d[22:26].strip())
atom_name = d[12:16].strip()
res_name = d[17:20].strip()
x = float(d[30:38])
y = float(d[38:46])
z = float(d[46:54])
atom = self.Atom(
atom_id=atom_id, atom_name=atom_name, residue_id=res_id,
residue_name=res_name, x=x, y=y, z=z
)
self.cl_data.append(atom)
except ValueError:
continue
# get the coordinates of all C-alpha
def _get_ca(self):
self.all_ca = []
self.ca_res = []
self.ca_solv = []
for atom in self.cl_data:
if atom.atom_name.upper() == "CA":
try:
self.ca_res.append(
self.resi_to_int[atom.residue_name.upper()])
except KeyError:
self.ca_res.append(self.resi_to_int["NAA"])
try:
self.ca_solv.append(
self.solvent_access.atomArea(atom.atom_id))
except Exception:
self.ca_solv.append(0.0)
self.all_ca.append([atom.x, atom.y, atom.z])
def _get_c(self):
self.all_c = []
for atom in self.cl_data:
if atom.atom_name.upper() == "C":
self.all_c.append([atom.x, atom.y, atom.z])
def _get_n(self):
self.all_n = []
for atom in self.cl_data:
if atom.atom_name.upper() == "N":
self.all_n.append([atom.x, atom.y, atom.z])
# ### calculate the distance of all the C-alpha to other C-alpha
def distance(self, start, end):
st = np.array(start)
ed = np.array(end)
return np.linalg.norm(st-ed)
def _distance_to_others(self):
self.dist_to_others = []
for ca_st in (self.all_ca):
dist_st_ed = []
for ca_ed in (self.all_ca):
dist_st_ed.append(self.distance(ca_st, ca_ed))
self.dist_to_others.append(dist_st_ed)
# ### Find the C-alpha within the range of CUTOFF
def _find_in_range(self):
self.within_range = []
for i, ds in enumerate(self.dist_to_others):
neighbors = []
for j, d in enumerate(ds):
if d < self.cutoff and d > 0:
key = str(i) + "/" + str(j)
neighbors.append(key)
self.within_range.append(neighbors)
# ### calculate the "surface norms" of all the C-alpha
def _calculate_norms(self):
try:
assert(len(self.all_ca)==len(self.all_c))
except AssertionError:
print(f"{self.file_path} has different numbers of Ca and C.")
raise
all_ca = np.array(self.all_ca, dtype=np.float32)
all_c = np.array(self.all_c, dtype=np.float32)
self.norms = all_c - all_ca
def _calculate_surf_norm(self):
try:
assert(len(self.all_ca)==len(self.all_c))
except AssertionError:
print(f"{self.file_path} has different numbers of Ca and C.")
raise
all_ca = np.array(self.all_ca, dtype=np.float32)
all_c = np.array(self.all_c, dtype=np.float32)
all_n = | np.array(self.all_n, dtype=np.float32) | numpy.array |
import numpy as np
def lte(r,r0,n):
return 0 <= (r-r0).dot(n)
def lt(r,r0,n):
return 0 < (r-r0).dot(n)
def tilted_square_transformations(n,m,a_1=None,a_2=None):
if n < 1 or m < 1:
raise ValueError("n and m must be >= 1")
def test_r(r):
return lte(r,r0,L2) and lte(r,r0,L1) and lt(r,r1,-L2) and lt(r,r1,-L1)
L1=np.array([n,m])
L2=np.array([m,-n])
r0 = np.array([0,0])
r1 = L1+L2
x = np.arange(0,n+m+1,1)
x = np.kron(x,np.ones_like(x))
y = np.arange(-n,m+1,1)
y = np.kron(np.ones_like(y),y)
r_list = np.vstack((x,y)).T
r_lat = np.array([r for r in r_list[:] if test_r(r)])
arg = np.argsort(r_lat[:,0]+1j*r_lat[:,1])
r_lat = r_lat[arg].copy()
if a_1 is None:
a_1 = np.array([0,1])
if a_2 is None:
a_2 = np.array([1,0])
Pr = | np.array([[0,-1],[1,0]]) | numpy.array |
# -*- coding: utf8 -*-
'''This module implements techniques derived from the pGlyco2
FDR estimation procedure described in:
[1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>.-Y.
(2017). pGlyco 2.0 enables precision N-glycoproteomics with comprehensive quality
control and one-step mass spectrometry for intact glycopeptide identification.
Nature Communications, 8(1), 438. https://doi.org/10.1038/s41467-017-00535-2
[2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>. (2016).
pGlyco: a pipeline for the identification of intact N-glycopeptides by using HCD-
and CID-MS/MS and MS3. Scientific Reports, 6(April), 25102. https://doi.org/10.1038/srep25102
'''
import numpy as np
try:
from matplotlib import pyplot as plt
except ImportError:
pass
from glypy.utils import Enum
from glycan_profiling.task import TaskBase
from glycan_profiling.tandem.target_decoy import NearestValueLookUp, TargetDecoyAnalyzer
from glycan_profiling.tandem.spectrum_match import SpectrumMatch
from glycan_profiling.tandem.glycopeptide.core_search import approximate_internal_size_of_glycan
from .mixture import GammaMixture, GaussianMixtureWithPriorComponent
from .journal import SolutionSetGrouper
def noop(*args, **kwargs):
pass
class GlycopeptideFDREstimationStrategy(Enum):
multipart_gamma_gaussian_mixture = 0
peptide_fdr = 1
glycan_fdr = 2
glycopeptide_fdr = 3
GlycopeptideFDREstimationStrategy.multipart_gamma_gaussian_mixture.add_name("multipart")
GlycopeptideFDREstimationStrategy.multipart_gamma_gaussian_mixture.add_name("joint")
GlycopeptideFDREstimationStrategy.peptide_fdr.add_name("peptide")
GlycopeptideFDREstimationStrategy.glycan_fdr.add_name("glycan")
class FiniteMixtureModelFDREstimator(object):
def __init__(self, decoy_scores, target_scores):
self.decoy_scores = np.array(decoy_scores)
self.target_scores = np.array(target_scores)
self.decoy_mixture = None
self.target_mixture = None
self.fdr_map = None
def log(self, message):
print(message)
def estimate_gamma(self, max_components=10):
models = []
bics = []
n = len(self.decoy_scores)
np.random.seed(n)
if n < 10:
self.log("Too few decoy observations")
self.decoy_mixture = GammaMixture([1.0], [1.0], [1.0])
return self.decoy_mixture
for i in range(1, max_components + 1):
self.log("Fitting %d Components" % (i,))
model = GammaMixture.fit(self.decoy_scores, i)
bic = model.bic(self.decoy_scores)
models.append(model)
bics.append(bic)
self.log("BIC: %g" % (bic,))
i = np.argmin(bics)
self.log("Selected %d Components" % (i + 1,))
self.decoy_mixture = models[i]
return self.decoy_mixture
def estimate_gaussian(self, max_components=10):
models = []
bics = []
n = len(self.target_scores)
np.random.seed(n)
if n < 10:
self.log("Too few target observations")
self.target_mixture = GaussianMixtureWithPriorComponent([1.0], [1.0], self.decoy_mixture, [0.5, 0.5])
return self.target_mixture
for i in range(1, max_components + 1):
self.log("Fitting %d Components" % (i,))
model = GaussianMixtureWithPriorComponent.fit(
self.target_scores, i, self.decoy_mixture, deterministic=True)
bic = model.bic(self.target_scores)
models.append(model)
bics.append(bic)
self.log("BIC: %g" % (bic,))
i = np.argmin(bics)
self.log("Selected %d Components" % (i + 1,))
self.target_mixture = models[i]
return self.target_mixture
def estimate_posterior_error_probability(self, X):
return self.target_mixture.prior.score(X) * self.target_mixture.weights[
-1] / self.target_mixture.score(X)
def estimate_fdr(self, X):
X_ = np.array(sorted(X, reverse=True))
pep = self.estimate_posterior_error_probability(X_)
# The FDR is the expected value of PEP, or the average PEP in this case.
# The expression below is a cumulative mean (the cumulative sum divided
# by the number of elements in the sum)
fdr = np.cumsum(pep) / np.arange(1, len(X_) + 1)
# Use searchsorted on the ascending ordered version of X_
# to find the indices of the origin values of X, then map
# those into the ascending ordering of the FDR vector to get
# the FDR estimates of the original X
fdr[np.isnan(fdr)] = 1.0
fdr_descending = fdr[::-1]
for i in range(1, fdr_descending.shape[0]):
if fdr_descending[i - 1] < fdr_descending[i]:
fdr_descending[i] = fdr_descending[i - 1]
fdr = fdr_descending[::-1]
fdr = fdr[::-1][np.searchsorted(X_[::-1], X)]
return fdr
def plot_mixture(self, ax=None):
if ax is None:
fig, ax = plt.subplots(1)
X = np.arange(1, max(self.target_scores), 0.1)
ax.plot(X,
np.exp(self.target_mixture.logpdf(X)).sum(axis=1))
for col in np.exp(self.target_mixture.logpdf(X)).T:
ax.plot(X, col, linestyle='--')
ax.hist(self.target_scores, bins=100, density=1, alpha=0.15)
return ax
def plot(self, ax=None):
if ax is None:
fig, ax = plt.subplots(1)
points = np.linspace(
min(self.target_scores.min(), self.decoy_scores.min()),
max(self.target_scores.max(), self.decoy_scores.max()),
10000)
target_scores = np.sort(self.target_scores)
target_counts = [(self.target_scores >= i).sum() for i in points]
decoy_counts = [(self.decoy_scores >= i).sum() for i in points]
fdr = self.estimate_fdr(target_scores)
at_5_percent = np.where(fdr < 0.05)[0][0]
at_1_percent = np.where(fdr < 0.01)[0][0]
line1 = ax.plot(points, target_counts, label='Target', color='blue')
line2 = ax.plot(points, decoy_counts, label='Decoy', color='orange')
ax.vlines(target_scores[at_5_percent], 0, np.max(target_counts), linestyle='--', color='blue', lw=0.75)
ax.vlines(target_scores[at_1_percent], 0, | np.max(target_counts) | numpy.max |
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
from ogcore.constants import (GROUP_LABELS, VAR_LABELS, ToGDP_LABELS,
CBO_UNITS, DEFAULT_START_YEAR)
import ogcore.utils as utils
from ogcore.utils import Inequality
def plot_aggregates(base_tpi, base_params, reform_tpi=None,
reform_params=None, var_list=['Y', 'C', 'K', 'L'],
plot_type='pct_diff', num_years_to_plot=50,
start_year=DEFAULT_START_YEAR,
vertical_line_years=None,
plot_title=None, path=None):
'''
Create a plot of macro aggregates.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var_list (list): names of variable to plot
plot_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform
(reform-base)
'levels': plot variables in model units
'cbo': plots variables in levels relative to CBO baseline
projection (only available for macro variables in CBO
long-term forecasts)
num_years_to_plot (integer): number of years to include in plot
start_year (integer): year to start plot
vertical_line_years (list): list of integers for years want
vertical lines at
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of macro aggregates
'''
assert isinstance(start_year, (int, np.integer))
assert (isinstance(num_years_to_plot, int))
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
year_vec = np.arange(start_year, start_year + num_years_to_plot)
start_index = start_year - base_params.start_year
# Check that reform included if doing pct_diff or diff plot
if plot_type == 'pct_diff' or plot_type == 'diff':
assert (reform_tpi is not None)
fig1, ax1 = plt.subplots()
for i, v in enumerate(var_list):
if plot_type == 'pct_diff':
if v in ['r_gov', 'r', 'r_p']:
# Compute just percentage point changes for rates
plot_var = reform_tpi[v] - base_tpi[v]
else:
plot_var = (reform_tpi[v] - base_tpi[v]) / base_tpi[v]
ylabel = r'Pct. change'
plt.plot(year_vec,
plot_var[start_index: start_index +
num_years_to_plot], label=VAR_LABELS[v])
elif plot_type == 'diff':
plot_var = reform_tpi[v] - base_tpi[v]
ylabel = r'Difference (Model Units)'
plt.plot(year_vec,
plot_var[start_index: start_index +
num_years_to_plot], label=VAR_LABELS[v])
elif plot_type == 'levels':
plt.plot(year_vec,
base_tpi[v][start_index: start_index +
num_years_to_plot],
label='Baseline ' + VAR_LABELS[v])
if reform_tpi:
plt.plot(year_vec,
reform_tpi[v][start_index: start_index +
num_years_to_plot],
label='Reform ' + VAR_LABELS[v])
ylabel = r'Model Units'
elif plot_type == 'cbo':
# Need reform and baseline to ensure CBO plot makes sense
assert (reform_tpi is not None)
# read in CBO forecasts
df_cbo = utils.read_cbo_forecast()
# assert variable in cbo data
assert (v in df_cbo.columns)
# assert cbo data has start year and end year
assert (df_cbo.year.min() <= start_year)
assert (df_cbo.year.max() >= start_year + num_years_to_plot)
cbo_data = df_cbo[
(df_cbo['year'] >= start_year) &
(df_cbo['year'] <= start_year +
num_years_to_plot - 1)][v].values
# Plot CBO baseline
plot_var_base = cbo_data
plt.plot(year_vec, plot_var_base, label='Baseline ' +
VAR_LABELS[v])
# Plot change in CBO baseline
pct_change = ((reform_tpi[v] - base_tpi[v]) /
base_tpi[v])[start_index: start_index +
num_years_to_plot]
plot_var_reform = (1 + pct_change) * cbo_data
plt.plot(year_vec, plot_var_reform, label='Reform ' +
VAR_LABELS[v])
# making units labels will not work if multiple variables
# and they are in different units
ylabel = CBO_UNITS[v]
else:
print('Please enter a valid plot type')
assert(False)
# vertical markers at certain years
if vertical_line_years:
for yr in vertical_line_years:
plt.axvline(x=yr, linewidth=0.5, linestyle='--', color='k')
plt.xlabel(r'Year $t$')
plt.ylabel(ylabel)
if plot_title:
plt.title(plot_title, fontsize=15)
ax1.set_yticks(ax1.get_yticks().tolist())
vals = ax1.get_yticks()
if plot_type == 'pct_diff':
ax1.set_yticklabels(['{:,.2%}'.format(x) for x in vals])
plt.xlim((base_params.start_year - 1, base_params.start_year +
num_years_to_plot))
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def ss_3Dplot(base_params, base_ss, reform_params=None, reform_ss=None,
var='bssmat_splus1', plot_type='levels', plot_title=None,
path=None):
'''
Create a 3d plot of household decisions.
Args:
base_params (OG-Core Specifications class): baseline parameters object
base_ss (dictionary): SS output from baseline run
reform_params (OG-Core Specifications class): reform parameters object
reform_ss (dictionary): SS output from reform run
var (string): name of variable to plot
plot_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform (reform-base)
'levels': plot variables in model units
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of household decisions
'''
if reform_params:
assert(base_params.J == reform_params.J)
assert(base_params.starting_age == reform_params.starting_age)
assert(base_params.ending_age == reform_params.ending_age)
assert(base_params.S == reform_params.S)
domain = np.linspace(base_params.starting_age,
base_params.ending_age, base_params.S)
Jgrid = np.zeros(base_params.J)
for j in range(base_params.J):
Jgrid[j:] += base_params.lambdas[j]
if plot_type == 'levels':
data = base_ss[var].T
elif plot_type == 'diff':
data = (reform_ss[var] - base_ss[var]).T
elif plot_type == 'pct_diff':
data = ((reform_ss[var] - base_ss[var]) / base_ss[var]).T
cmap1 = matplotlib.cm.get_cmap('jet')
X, Y = np.meshgrid(domain, Jgrid)
fig5 = plt.figure()
ax5 = fig5.gca(projection='3d')
ax5.set_xlabel(r'age-$s$')
ax5.set_ylabel(r'ability type-$j$')
ax5.set_zlabel(r'individual savings $\bar{b}_{j,s}$')
ax5.plot_surface(X, Y, data, rstride=1, cstride=1, cmap=cmap1)
if plot_title:
plt.title(plot_title)
if path:
plt.savefig(path)
else:
return plt
def plot_gdp_ratio(base_tpi, base_params, reform_tpi=None,
reform_params=None, var_list=['D'],
plot_type='levels', num_years_to_plot=50,
start_year=DEFAULT_START_YEAR, vertical_line_years=None,
plot_title=None, path=None):
'''
Create a plot of some variable to GDP.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters object
p (OG-Core Specifications class): parameters object
var_list (list): names of variable to plot
plot_type (string): type of plot, can be:
'diff': plots difference between baseline and reform
(reform-base)
'levels': plot variables in model units
num_years_to_plot (integer): number of years to include in plot
start_year (integer): year to start plot
vertical_line_years (list): list of integers for years want
vertical lines at
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of ratio of a variable to GDP
'''
assert isinstance(start_year, (int, np.integer))
assert (isinstance(num_years_to_plot, int))
if plot_type == 'diff':
assert (reform_tpi is not None)
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
year_vec = np.arange(start_year, start_year + num_years_to_plot)
start_index = start_year - base_params.start_year
fig1, ax1 = plt.subplots()
for i, v in enumerate(var_list):
if plot_type == 'levels':
plot_var_base = (base_tpi[v][:base_params.T] /
base_tpi['Y'][:base_params.T])
if reform_tpi:
plot_var_reform = (reform_tpi[v][:base_params.T] /
reform_tpi['Y'][:base_params.T])
plt.plot(year_vec, plot_var_base[start_index: start_index +
num_years_to_plot],
label='Baseline ' + ToGDP_LABELS[v])
plt.plot(year_vec, plot_var_reform[start_index: start_index +
num_years_to_plot],
label='Reform ' + ToGDP_LABELS[v])
else:
plt.plot(year_vec, plot_var_base[start_index: start_index +
num_years_to_plot],
label=ToGDP_LABELS[v])
else: # if plotting differences in ratios
var_base = (base_tpi[v][:base_params.T] /
base_tpi['Y'][:base_params.T])
var_reform = (reform_tpi[v][:base_params.T] /
reform_tpi['Y'][:base_params.T])
plot_var = var_reform - var_base
plt.plot(year_vec, plot_var[start_index: start_index +
num_years_to_plot],
label=ToGDP_LABELS[v])
ylabel = r'Percent of GDP'
# vertical markers at certain years
if vertical_line_years:
for yr in vertical_line_years:
plt.axvline(x=yr, linewidth=0.5, linestyle='--', color='k')
plt.xlabel(r'Year $t$')
plt.ylabel(ylabel)
if plot_title:
plt.title(plot_title, fontsize=15)
ax1.set_yticks(ax1.get_yticks().tolist())
vals = ax1.get_yticks()
if plot_type == 'levels':
ax1.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
else:
ax1.set_yticklabels(['{:,.2%}'.format(x) for x in vals])
plt.xlim((base_params.start_year - 1, base_params.start_year +
num_years_to_plot))
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def ability_bar(base_tpi, base_params, reform_tpi,
reform_params, var='n_mat', num_years=5,
start_year=DEFAULT_START_YEAR, plot_title=None, path=None):
'''
Plots percentage changes from baseline by ability group for a
given variable.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
num_year (integer): number of years to compute changes over
start_year (integer): year to start plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of results by ability type
'''
assert isinstance(start_year, (int, np.integer))
assert isinstance(num_years, (int, np.integer))
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
N = base_params.J
fig, ax = plt.subplots()
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
start_index = start_year - base_params.start_year
omega_to_use = base_params.omega[:base_params.T, :].reshape(
base_params.T, base_params.S, 1)
base_val = (base_tpi[var] * omega_to_use)[
start_index:start_index + num_years, :, :].sum(1).sum(0)
reform_val = (reform_tpi[var] * omega_to_use)[
start_index:start_index + num_years, :, :].sum(1).sum(0)
var_to_plot = (reform_val - base_val) / base_val
ax.bar(ind, var_to_plot * 100, width, bottom=0)
ax.set_xticks(ind + width / 4)
ax.set_xticklabels(list(GROUP_LABELS[base_params.J].values()))
plt.xticks(rotation=45)
plt.ylabel(r'Percentage Change in ' + VAR_LABELS[var])
if plot_title:
plt.title(plot_title, fontsize=15)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig
plt.close()
def ability_bar_ss(base_ss, base_params, reform_ss, reform_params,
var='nssmat', plot_title=None, path=None):
'''
Plots percentage changes from baseline by ability group for a
given variable.
Args:
base_ss (dictionary): SS output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): SS output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of results by ability type
'''
N = base_params.J
fig, ax = plt.subplots()
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
base_val = (
base_ss[var] *
base_params.omega_SS.reshape(base_params.S, 1)).sum(0)
reform_val = (
reform_ss[var] *
reform_params.omega_SS.reshape(reform_params.S, 1)).sum(0)
var_to_plot = (reform_val - base_val) / base_val
ax.bar(ind, var_to_plot * 100, width, bottom=0)
ax.set_xticks(ind + width / 4)
ax.set_xticklabels(list(GROUP_LABELS[base_params.J].values()))
plt.xticks(rotation=45)
plt.ylabel(r'Percentage Change in ' + VAR_LABELS[var])
if plot_title:
plt.title(plot_title, fontsize=15)
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig
plt.close()
def tpi_profiles(base_tpi, base_params, reform_tpi=None,
reform_params=None, by_j=True, var='n_mat',
num_years=5, start_year=DEFAULT_START_YEAR, plot_title=None,
path=None):
'''
Plot lifecycle profiles of given variable in the SS.
Args:
base_ss (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
num_year (integer): number of years to compute changes over
start_year (integer): year to start plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of lifecycle profiles
'''
assert isinstance(start_year, (int, np.integer))
assert isinstance(num_years, (int, np.integer))
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
assert (base_params.S == reform_params.S)
assert (base_params.starting_age == reform_params.starting_age)
assert (base_params.ending_age == reform_params.ending_age)
age_vec = np.arange(base_params.starting_age,
base_params.starting_age + base_params.S)
fig1, ax1 = plt.subplots()
start_idx = start_year - base_params.start_year
end_idx = start_idx + num_years
if by_j:
cm = plt.get_cmap('coolwarm')
ax1.set_prop_cycle(color=[cm(1. * i / 7) for i in range(7)])
for j in range(base_params.J):
plt.plot(age_vec,
base_tpi[var][start_idx: end_idx, :,
j].sum(axis=0) / num_years,
label='Baseline, j = ' + str(j))
if reform_tpi:
plt.plot(age_vec,
reform_tpi[var][start_idx: end_idx, :,
j].sum(axis=0) / num_years,
label='Reform, j = ' + str(j), linestyle='--')
else:
base_var = ((
base_tpi[var][start_idx: end_idx, :, :] *
base_params.lambdas.reshape(1, 1, base_params.J)
).sum(axis=2).sum(axis=0) / num_years)
plt.plot(age_vec, base_var, label='Baseline')
if reform_tpi:
reform_var = ((
reform_tpi[var][start_idx: end_idx, :, :] *
reform_params.lambdas.reshape(1, 1, base_params.J)
).sum(axis=2).sum(axis=0) / num_years)
plt.plot(age_vec, reform_var, label='Reform',
linestyle='--')
plt.xlabel(r'Age')
plt.ylabel(VAR_LABELS[var])
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if plot_title:
plt.title(plot_title, fontsize=15)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def ss_profiles(base_ss, base_params, reform_ss=None,
reform_params=None, by_j=True, var='nssmat',
plot_data=None,
plot_title=None, path=None):
'''
Plot lifecycle profiles of given variable in the SS.
Args:
base_ss (dictionary): SS output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): SS output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
plot_data (array_like): series of data to add to plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of lifecycle profiles
'''
if reform_ss:
assert (base_params.S == reform_params.S)
assert (base_params.starting_age == reform_params.starting_age)
assert (base_params.ending_age == reform_params.ending_age)
age_vec = np.arange(base_params.starting_age,
base_params.starting_age + base_params.S)
fig1, ax1 = plt.subplots()
if by_j:
cm = plt.get_cmap('coolwarm')
ax1.set_prop_cycle(color=[cm(1. * i / 7) for i in range(7)])
for j in range(base_params.J):
plt.plot(age_vec, base_ss[var][:, j],
label='Baseline, j = ' + str(j))
if reform_ss:
plt.plot(age_vec, reform_ss[var][:, j],
label='Reform, j = ' + str(j), linestyle='--')
else:
base_var = (
base_ss[var][:, :] *
base_params.lambdas.reshape(1, base_params.J)).sum(axis=1)
plt.plot(age_vec, base_var, label='Baseline')
if reform_ss:
reform_var = (
reform_ss[var][:, :] *
reform_params.lambdas.reshape(1, reform_params.J)).sum(axis=1)
plt.plot(age_vec, reform_var, label='Reform', linestyle='--')
if plot_data is not None:
plt.plot(age_vec, plot_data, linewidth=2.0, label='Data',
linestyle=':')
plt.xlabel(r'Age')
plt.ylabel(VAR_LABELS[var])
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if plot_title:
plt.title(plot_title, fontsize=15)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def plot_all(base_output_path, reform_output_path, save_path):
'''
Function to plot all default output plots.
Args:
base_output_path (str): path to baseline results
reform_output_path (str): path to reform results
save_path (str): path to save plots to
Returns:
None: All output figures saved to disk.
'''
# Make directory in case it doesn't exist
utils.mkdirs(save_path)
# Read in data
# Read in TPI output and parameters
base_tpi = utils.safe_read_pickle(
os.path.join(base_output_path, 'TPI', 'TPI_vars.pkl')
)
base_ss = utils.safe_read_pickle(
os.path.join(base_output_path, 'SS', 'SS_vars.pkl')
)
base_params = utils.safe_read_pickle(
os.path.join(base_output_path, 'model_params.pkl')
)
reform_tpi = utils.safe_read_pickle(
os.path.join(reform_output_path, 'TPI', 'TPI_vars.pkl')
)
reform_ss = utils.safe_read_pickle(
os.path.join(reform_output_path, 'SS', 'SS_vars.pkl')
)
reform_params = utils.safe_read_pickle(
os.path.join(reform_output_path, 'model_params.pkl')
)
# Percentage changes in macro vars (Y, K, L, C)
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['Y', 'K', 'L', 'C'], plot_type='pct_diff',
num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Percentage Changes in Macro Aggregates',
path=os.path.join(save_path, 'MacroAgg_PctChange.png'))
# Percentage change in fiscal vars (D, G, TR, Rev)
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['D', 'G', 'TR', 'total_tax_revenue'],
plot_type='pct_diff', num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Percentage Changes in Fiscal Variables',
path=os.path.join(save_path, 'Fiscal_PctChange.png'))
# r and w in baseline and reform -- vertical lines at tG1, tG2
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['r'],
plot_type='levels', num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Real Interest Rates Under Baseline and Reform',
path=os.path.join(save_path, 'InterestRates.png'))
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['w'],
plot_type='levels', num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Wage Rates Under Baseline and Reform',
path=os.path.join(save_path, 'WageRates.png'))
# Debt-GDP in base and reform-- vertical lines at tG1, tG2
plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params,
var_list=['D'], num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Debt-to-GDP',
path=os.path.join(save_path, 'DebtGDPratio.png'))
# Tax revenue to GDP in base and reform-- vertical lines at tG1, tG2
plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params,
var_list=['total_tax_revenue'], num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Tax Revenue to GDP',
path=os.path.join(save_path, 'RevenueGDPratio.png'))
# Pct change in c, n, b, y, etr, mtrx, mtry by ability group over 10 years
var_list = ['c_path', 'n_mat', 'bmat_splus1', 'etr_path',
'mtrx_path', 'mtry_path', 'y_before_tax_mat']
title_list = ['consumption', 'labor supply', 'savings',
'effective tax rates',
'marginal tax rates on labor income',
'marginal tax rates on capital income',
'before tax income']
path_list = ['Cons', 'Labor', 'Save', 'ETR', 'MTRx', 'MTRy',
'Income']
for i, v in enumerate(var_list):
ability_bar(base_tpi, base_params, reform_tpi, reform_params,
var=v, num_years=10,
start_year=base_params.start_year,
plot_title='Percentage changes in ' + title_list[i],
path=os.path.join(save_path, 'PctChange_' +
path_list[i] + '.png'))
# lifetime profiles, base vs reform, SS for c, n, b, y - not by j
var_list = ['cssmat', 'nssmat', 'bssmat_splus1', 'etr_ss',
'mtrx_ss', 'mtry_ss']
for i, v in enumerate(var_list):
ss_profiles(base_ss, base_params, reform_ss, reform_params,
by_j=False, var=v,
plot_title='Lifecycle Profile of ' + title_list[i],
path=os.path.join(save_path, 'SSLifecycleProfile_' +
path_list[i] + '.png'))
# lifetime profiles, c, n , b, y by j, separately for base and reform
for i, v in enumerate(var_list):
ss_profiles(base_ss, base_params,
by_j=True, var=v,
plot_title='Lifecycle Profile of ' + title_list[i],
path=os.path.join(save_path, 'SSLifecycleProfile_' +
path_list[i] + '_Baseline.png'))
ss_profiles(reform_ss, reform_params,
by_j=True, var=v,
plot_title='Lifecycle Profile of ' + title_list[i],
path=os.path.join(save_path, 'SSLifecycleProfile_' +
path_list[i] + '_Reform.png'))
def inequality_plot(
base_tpi, base_params, reform_tpi=None, reform_params=None,
var='c_path', ineq_measure='gini', pctiles=None,
plot_type='levels', num_years_to_plot=50,
start_year=DEFAULT_START_YEAR, vertical_line_years=None,
plot_title=None, path=None):
'''
Plot measures of inequality over the time path.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var(string): name of variable to plot
ineq_measure (string): inequality measure to plot, can be:
'gini': Gini coefficient
'var_of_logs': variance of logs
'pct_ratio': percentile ratio
'top_share': top share of total
pctiles (tuple or None): percentiles for percentile ratios
(numerator, denominator) or percentile for top share (not
required for Gini or var_of_logs)
plot_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform
(reform-base)
'levels': plot variables in model units
num_years_to_plot (integer): number of years to include in plot
start_year (integer): year to start plot
vertical_line_years (list): list of integers for years want
vertical lines at
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of inequality measure
'''
assert isinstance(start_year, (int, np.integer))
assert (isinstance(num_years_to_plot, int))
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
assert ineq_measure in ['gini', 'var_of_logs', 'pct_ratio',
'top_share']
if (ineq_measure == 'pct_ratio') | (ineq_measure == 'top_share'):
assert pctiles
year_vec = np.arange(start_year, start_year + num_years_to_plot)
# Check that reform included if doing pct_diff or diff plot
if plot_type == 'pct_diff' or plot_type == 'diff':
assert (reform_tpi is not None)
fig1, ax1 = plt.subplots()
base_values = | np.zeros(num_years_to_plot) | numpy.zeros |
from equadratures.parameter import Parameter
from equadratures.poly import Poly
from equadratures.basis import Basis
from equadratures.scalers import scaler_minmax, scaler_meanvar, scaler_custom
import equadratures.plot as plot
import numpy as np
import scipy
import scipy.io
from scipy.linalg import orth, sqrtm
from scipy.spatial import ConvexHull
from scipy.special import comb
from scipy.optimize import linprog
import warnings
class Subspaces(object):
""" This class defines a subspaces object. It can be used for polynomial-based subspace dimension reduction.
Parameters
----------
method : str
The method to be used for subspace-based dimension reduction. Two options:
- ``active-subspace``, which uses ideas in [1] and [2] to compute a dimension-reducing subspace with a global polynomial approximant. Gradients evaluations of the polynomial approximation are used to compute the averaged outer product of the gradient covariance matrix. The polynomial approximation in the original full-space can be provided via ``full_space_poly``. Otherwise, it is fit internally to the data provided via ``sample_points`` and ``sample_outputs``.
- ``variable-projection`` [3], where a Gauss-Newton optimisation problem is solved to compute both the polynomial coefficients and its subspace, with the data provided via ``sample_points`` and ``sample_outputs``.
full_space_poly : Poly, optional
An instance of Poly fitted to the full-space data, to use for the AS computation.
sample_points : numpy.ndarray, optional
Array with shape (number_of_observations, dimensions) that corresponds to a set of sample points over the parameter space.
sample_outputs : numpy.ndarray, optional
Array with shape (number_of_observations, 1) that corresponds to model evaluations at the sample points.
subspace_dimension : int, optional
The dimension of the *active* subspace.
param_args : dict, optional
Arguments passed to parameters of the AS polynomial. (see :class:`~equadratures.parameter.Parameter`)
poly_args : dict , optional
Arguments passed to constructing polynomial used for AS computation. (see :class:`~equadratures.poly.Poly`)
dr_args : dict, optional
Arguments passed to customise the VP optimiser. See documentation for :meth:`~equadratures.subspaces.Subspaces._get_variable_projection` in source.
Examples
--------
Obtaining a 2D subspace via active subspaces on user data
>>> mysubspace = Subspaces(method='active-subspace', sample_points=X, sample_outputs=Y)
>>> eigs = mysubspace.get_eigenvalues()
>>> W = mysubspace.get_subspace()[:, :2]
>>> e = mysubspace.get_eigenvalues()
Obtaining a 2D subspace via active subspaces with a Poly object (remember to call set_model() on Poly first)
>>> mysubspace = Subspaces(method='active-subspace', full_space_poly=my_poly)
>>> eigs = mysubspace.get_eigenvalues()
>>> W = mysubspace.get_subspace()[:, :2]
>>> e = mysubspace.get_eigenvalues()
Obtaining a 2D subspace via variable projection on user data
>>> mysubspace = Subspaces(method='variable-projection', sample_points=X, sample_outputs=Y)
>>> W = mysubspace.get_subspace()[:, :2]
References
----------
1. <NAME>., (2015) Active Subspaces: Emerging Ideas for Dimension Reduction in Parameter Studies. SIAM Spotlights.
2. <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2018) Turbomachinery Active Subspace Performance Maps. Journal of Turbomachinery, 140(4), 041003. `Paper <http://turbomachinery.asmedigitalcollection.asme.org/article.aspx?articleid=2668256>`__.
3. <NAME>., <NAME>., (2018) Data-driven Polynomial Ridge Approximation Using Variable Projection. SIAM Journal of Scientific Computing, 40(3), A1566-A1589. `Paper <https://epubs.siam.org/doi/abs/10.1137/17M1117690>`__.
"""
def __init__(self, method, full_space_poly=None, sample_points=None, sample_outputs=None,
subspace_dimension=2, polynomial_degree=2, param_args=None, poly_args=None, dr_args=None):
self.full_space_poly = full_space_poly
self.sample_points = sample_points
self.Y = None # for the zonotope vertices
self.sample_outputs = sample_outputs
self.method = method
self.subspace_dimension = subspace_dimension
self.polynomial_degree = polynomial_degree
my_poly_args = {'method': 'least-squares', 'solver_args': {}}
if poly_args is not None:
my_poly_args.update(poly_args)
self.poly_args = my_poly_args
my_param_args = {'distribution': 'uniform', 'order': self.polynomial_degree, 'lower': -1, 'upper': 1}
if param_args is not None:
my_param_args.update(param_args)
# I suppose we can detect if lower and upper is present to decide between these categories?
bounded_distrs = ['analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian', 'uniform']
unbounded_distrs = ['gaussian', 'normal', 'gumbel', 'logistic', 'students-t', 'studentst']
semi_bounded_distrs = ['chi', 'chi-squared', 'exponential', 'gamma', 'lognormal', 'log-normal', 'pareto', 'rayleigh', 'weibull']
if dr_args is not None:
if 'standardize' in dr_args:
dr_args['standardise'] = dr_args['standardize']
if self.method.lower() == 'active-subspace' or self.method.lower() == 'active-subspaces':
self.method = 'active-subspace'
if dr_args is not None:
self.standardise = getattr(dr_args, 'standardise', True)
else:
self.standardise = True
if self.full_space_poly is None:
# user provided input/output data
N, d = self.sample_points.shape
if self.standardise:
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
param = Parameter(**my_param_args)
if param_args is not None:
if (hasattr(dr_args, 'lower') or hasattr(dr_args, 'upper')) and self.standardise:
warnings.warn('Points standardised but parameter range provided. Overriding default ([-1,1])...',
UserWarning)
myparameters = [param for _ in range(d)]
mybasis = Basis("total-order")
mypoly = Poly(myparameters, mybasis, sampling_args={'sample-points': self.std_sample_points,
'sample-outputs': self.sample_outputs},
**my_poly_args)
mypoly.set_model()
self.full_space_poly = mypoly
else:
# User provided polynomial
# Standardise according to distribution specified. Only care about the scaling (not shift)
# TODO: user provided callable with parameters?
user_params = self.full_space_poly.parameters
d = len(user_params)
self.sample_points = self.full_space_poly.get_points()
if self.standardise:
scale_factors = np.zeros(d)
centers = np.zeros(d)
for dd, p in enumerate(user_params):
if p.name.lower() in bounded_distrs:
scale_factors[dd] = (p.upper - p.lower) / 2.0
centers[dd] = (p.upper + p.lower) / 2.0
elif p.name.lower() in unbounded_distrs:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = p.mean
else:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = 0.0
self.param_scaler = scaler_custom(centers, scale_factors)
self.std_sample_points = self.param_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
if not hasattr(self.full_space_poly, 'coefficients'):
raise ValueError('Please call set_model() first on poly.')
self.sample_outputs = self.full_space_poly.get_model_evaluations()
# TODO: use dr_args for resampling of gradient points
as_args = {'grad_points': None}
if dr_args is not None:
as_args.update(dr_args)
self._get_active_subspace(**as_args)
elif self.method == 'variable-projection':
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
if dr_args is not None:
vp_args = {'gamma':0.1, 'beta':1e-4, 'tol':1e-7, 'maxiter':1000, 'U0':None, 'verbose':False}
vp_args.update(dr_args)
self._get_variable_projection(**vp_args)
else:
self._get_variable_projection()
def get_subspace_polynomial(self):
""" Returns a polynomial defined over the dimension reducing subspace.
Returns
-------
Poly
A Poly object that defines a polynomial over the subspace. The distribution of parameters
is assumed to be uniform and the maximum and minimum bounds for each parameter are defined by the maximum
and minimum values of the project samples.
"""
# TODO: Try correlated poly here
active_subspace = self._subspace[:, 0:self.subspace_dimension]
projected_points = np.dot(self.std_sample_points, active_subspace)
myparameters = []
for i in range(0, self.subspace_dimension):
param = Parameter(distribution='uniform', lower=np.min(projected_points[:, i]),
upper=np.max(projected_points[:, i]), order=self.polynomial_degree)
myparameters.append(param)
mybasis = Basis("total-order")
subspacepoly = Poly(myparameters, mybasis, method='least-squares',
sampling_args={'sample-points': projected_points,
'sample-outputs': self.sample_outputs})
subspacepoly.set_model()
return subspacepoly
def get_eigenvalues(self):
""" Returns the eigenvalues of the dimension reducing subspace. Note: this option is
currently only valid for method ``active-subspace``.
Returns
-------
numpy.ndarray
Array of shape (dimensions,) corresponding to the eigenvalues of the above mentioned covariance matrix.
"""
if self.method == 'active-subspace':
return self._eigenvalues
else:
print('Only the active-subspace method yields eigenvalues.')
def get_subspace(self):
""" Returns the dimension reducing subspace.
Returns
-------
numpy.ndarray
Array of shape (dimensions, dimensions) where the first ``subspace_dimension`` columns
contain the dimension reducing subspace, while the remaining columns contain its orthogonal complement.
"""
return self._subspace
def _get_active_subspace(self, grad_points=None, **kwargs):
""" Private method to compute active subspaces. """
if grad_points is None:
X = self.full_space_poly.get_points()
else:
if hasattr(self, 'data_scaler'):
X = self.data_scaler.transform(grad_points)
else:
# Either no standardisation, or user provided poly + param scaling
X = grad_points.copy()
M, d = X.shape
if d != self.sample_points.shape[1]:
raise ValueError('In _get_active_subspace: dimensions of gradient evaluation points mismatched with input dimension!')
alpha = 2.0
num_grad_lb = alpha * self.subspace_dimension * np.log(d)
if M < num_grad_lb:
warnings.warn('Number of gradient evaluation points is likely to be insufficient. Consider resampling!', UserWarning)
polygrad = self.full_space_poly.get_polyfit_grad(X)
if hasattr(self, 'param_scaler'):
# Evaluate gradient in transformed coordinate space
polygrad = self.param_scaler.div[:, np.newaxis] * polygrad
weights = np.ones((M, 1)) / M
R = polygrad.transpose() * weights
C = np.dot(polygrad, R )
# Compute eigendecomposition!
e, W = np.linalg.eigh(C)
idx = e.argsort()[::-1]
eigs = e[idx]
eigVecs = W[:, idx]
if hasattr(self, 'data_scaler'):
scale_factors = 2.0 / (self.data_scaler.Xmax - self.data_scaler.Xmin)
eigVecs = scale_factors[:, np.newaxis] * eigVecs
eigVecs = np.linalg.qr(eigVecs)[0]
self._subspace = eigVecs
self._eigenvalues = eigs
def _get_variable_projection(self, gamma=0.1, beta=1e-4, tol=1e-7, maxiter=1000, U0=None, verbose=False):
""" Private method to obtain an active subspace in inputs design space via variable projection.
Note: It may help to standardize outputs to zero mean and unit variance
Parameters
----------
gamma : float, optional
Step length reduction factor (0,1).
beta : float, optional
Armijo tolerance for backtracking line search (0,1).
tol : float, optional
Tolerance for convergence, measured in the norm of residual over norm of f.
maxiter : int, optional
Maximum number of optimisation iterations.
U0 : numpy.ndarray, optional
Initial guess for active subspace.
verbose : bool, optional
Set to ``True`` for debug messages.
"""
# NOTE: How do we know these are the best values of gamma and beta?
M, m = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
U, _ = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points,U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
# Construct the affine transformation
eta = 2 * np.divide((y - minmax[0,:]), (minmax[1,:]-minmax[0,:])) - 1
# Construct the Vandermonde matrix step 6
V, poly_obj = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V,coeff)
# R = np.linalg.norm(res)
# TODO: convergence criterion??
for iteration in range(0,maxiter):
# Construct the Jacobian step 9
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
# Calculate the gradient of Jacobian (step 10)
G = np.zeros((m, self.subspace_dimension))
# NOTE: Can be vectorised
for i in range(0, M):
G += res[i]*J[i, :, :]
# conduct the SVD for J_vec
vec_J = np.reshape(J, (M, m*self.subspace_dimension))
Y, S, Z = np.linalg.svd(vec_J,full_matrices=False) # step 11
# obtain delta
delta = np.dot(Y[:,:-self.subspace_dimension**2].T, res)
delta = np.dot(np.diag(1/S[:-self.subspace_dimension**2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension**2,:].T, delta).reshape(U.shape)
# carry out Gauss-Newton step
vec_delta=delta.flatten() # step 12
# vectorize G step 13
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
# check alpha step 14
if alpha >= 0:
delta = -G
alpha = -norm_G
# SVD on delta step 17
Y, S, Z = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U,Z.T)
t = 1
for iter2 in range(0,20):
U_new = np.dot(UZ, np.diag(np.cos(S*t))) + np.dot(Y, np.diag(np.sin(S*t)))#step 19
U_new = orth(U_new)
# Update the values with the new U matrix
y = np.dot(self.std_sample_points, U_new)
minmax[0,:] = np.amin(y, axis=0)
minmax[1,:] = np.amax(y, axis=0)
eta = 2 * np.divide((y - minmax[0,:]), (minmax[1,:]-minmax[0,:])) - 1
V_new, poly_obj = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new,coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res)+alpha*beta*t or t < 1e-10: # step 21
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
# coeff = coeff_new
V_plus = V_plus_new
res = res_new
# R = R_new
if dist_change < tol:
if verbose:
print("VP finished with %d iterations" % iteration)
break
if iteration == maxiter - 1 and verbose:
print("VP finished with %d iterations" % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
def get_zonotope_vertices(self, num_samples=10000, max_count=100000):
""" Returns the vertices of the zonotope -- the projection of the high-dimensional space over the computed
subspace.
Parameters
----------
num_samples : int, optional
Number of samples per iteration to check.
max_count : int, optional
Maximum number of iteration.
Returns
-------
numpy.ndarray
Array of shape (number of vertices, ``subspace_dimension``).
Note
----
This routine has been adapted from <NAME>'s zonotope_vertices() function; see reference below.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., (2016) Python Active-Subspaces Utility Library. Journal of Open Source Software, 1(5), 79. `Paper <http://joss.theoj.org/papers/10.21105/joss.00079>`__.
"""
m = self._subspace.shape[0]
n = self.subspace_dimension
W = self._subspace[:, :n]
if n == 1:
y0 = np.dot(W.T, np.sign(W))[0]
if y0 < -y0:
yl, yu = y0, -y0
xl, xu = np.sign(W), -np.sign(W)
else:
yl, yu = -y0, y0
xl, xu = -np.sign(W), np.sign(W)
Y = np.array([yl, yu]).reshape((2,1))
X = np.vstack((xl.reshape((1,m)), xu.reshape((1,m))))
self.Y = Y
return Y
else:
total_vertices = 0
for i in range(n):
total_vertices += comb(m-1,i)
total_vertices = int(2*total_vertices)
Z = np.random.normal(size=(num_samples, n))
X = get_unique_rows(np.sign(np.dot(Z, W.transpose())))
X = get_unique_rows(np.vstack((X, -X)))
N = X.shape[0]
count = 0
while N < total_vertices:
Z = np.random.normal(size=(num_samples, n))
X0 = get_unique_rows(np.sign(np.dot(Z, W.transpose())))
X0 = get_unique_rows(np.vstack((X0, -X0)))
X = get_unique_rows(np.vstack((X, X0)))
N = X.shape[0]
count += 1
if count > max_count:
break
num_vertices = X.shape[0]
if total_vertices > num_vertices:
print('Warning: {} of {} vertices found.'.format(num_vertices, total_vertices))
Y = np.dot(X, W)
self.Y = Y.reshape((num_vertices, n))
return self.Y
def get_linear_inequalities(self):
""" Returns the linear inequalities defining the zonotope vertices, i.e., Ax<=b.
Returns
-------
tuple
Tuple (A,b), containing the numpy.ndarray's A and b; where A is the matrix for setting the linear inequalities,
and b is the right-hand-side vector for setting the linear inequalities.
"""
if self.Y is None:
self.Y = self.get_zonotope_vertices()
n = self.Y.shape[1]
if n == 1:
A = np.array([[1],[-1]])
b = np.array([[max(self.Y)],[min(self.Y)]])
return A, b
else:
convexHull = ConvexHull(self.Y)
A = convexHull.equations[:,:n]
b = -convexHull.equations[:,n]
return A, b
def get_samples_constraining_active_coordinates(self, inactive_samples, active_coordinates):
""" A hit and run type sampling strategy for generating samples at a given coordinate in the active subspace
by varying its coordinates along the inactive subspace.
Parameters
----------
inactive_samples : int
The number of inactive samples required.
active_coordiantes : numpy.ndarray
The active subspace coordinates.
Returns
-------
numpy.ndarray
Array containing the full-space coordinates.
Note
----
This routine has been adapted from <NAME>'s hit_and_run() function; see reference below.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., (2016) Python Active-Subspaces Utility Library. Journal of Open Source Software, 1(5), 79. `Paper <http://joss.theoj.org/papers/10.21105/joss.00079>`__.
"""
y = active_coordinates
N = inactive_samples
W1 = self._subspace[:, :self.subspace_dimension]
W2 = self._subspace[:, self.subspace_dimension:]
m, n = W1.shape
s = np.dot(W1, y).reshape((m, 1))
normW2 = np.sqrt(np.sum(np.power(W2, 2), axis=1)).reshape((m, 1))
A = np.hstack((np.vstack((W2, -W2.copy())), np.vstack((normW2, normW2.copy()))))
b = np.vstack((1 - s, 1 + s)).reshape((2 * m, 1))
c = np.zeros((m - n + 1, 1))
c[-1] = -1.0
# print()
zc = linear_program_ineq(c, -A, -b)
z0 = zc[:-1].reshape((m - n, 1))
# define the polytope A >= b
s = np.dot(W1, y).reshape((m, 1))
A = np.vstack((W2, -W2))
b = np.vstack((-1 - s, -1 + s)).reshape((2 * m, 1))
# tolerance
ztol = 1e-6
eps0 = ztol / 4.0
Z = np.zeros((N, m - n))
for i in range(N):
# random direction
bad_dir = True
count, maxcount = 0, 50
while bad_dir:
d = np.random.normal(size=(m - n, 1))
bad_dir = np.any(np.dot(A, z0 + eps0 * d) <= b)
count += 1
if count >= maxcount:
Z[i:, :] = np.tile(z0, (1, N - i)).transpose()
yz = np.vstack([np.repeat(y[:, np.newaxis], N, axis=1), Z.T])
return np.dot(self._subspace, yz).T
# find constraints that impose lower and upper bounds on eps
f, g = b - np.dot(A, z0), np.dot(A, d)
# find an upper bound on the step
min_ind = np.logical_and(g <= 0, f < -np.sqrt(np.finfo(np.float).eps))
eps_max = np.amin(f[min_ind] / g[min_ind])
# find a lower bound on the step
max_ind = np.logical_and(g > 0, f < -np.sqrt(np.finfo(np.float).eps))
eps_min = np.amax(f[max_ind] / g[max_ind])
# randomly sample eps
eps1 = np.random.uniform(eps_min, eps_max)
# take a step along d
z1 = z0 + eps1 * d
Z[i, :] = z1.reshape((m - n,))
# update temp var
z0 = z1.copy()
yz = np.vstack([np.repeat(y[:, np.newaxis], N, axis=1), Z.T])
return np.dot(self._subspace, yz).T
def plot_sufficient_summary(self, ax=None, X_test=None, y_test=None, show=True, poly=True, uncertainty=False, legend=False, scatter_kwargs={}, plot_kwargs={}):
""" Generates a sufficient summary plot for 1D or 2D polynomial ridge approximations.
See :meth:`~equadratures.plot.plot_sufficient_summary` for full description. """
return plot.plot_sufficient_summary(self, ax, X_test, y_test, show, poly, uncertainty, legend, scatter_kwargs, plot_kwargs)
def plot_2D_contour_zonotope(self, mysubspace, minmax=[- 3.5, 3.5], grid_pts=180, show=True, ax=None):
""" Generates a 2D contour plot of the polynomial ridge approximation.
See :meth:`~equadratures.plot.plot_2D_contour_zonotope` for full description. """
return plot.plot_2D_contour_zonotope(self,minmax,grid_pts,show,ax)
def plot_samples_from_second_subspace_over_first(self, mysubspace_2, axs=None, no_of_samples=500, minmax=[- 3.5, 3.5], grid_pts=180, show=True):
"""
Generates a zonotope plot where samples from the second subspace are projected over the first.
See :meth:`~equadratures.plot.plot_samples_from_second_subspace_over_first` for full description.
"""
return plot.plot_samples_from_second_subspace_over_first(self,mysubspace_2, axs, no_of_samples, minmax, grid_pts, show)
def vandermonde(eta, p):
# TODO: Try using a "correlated" basis here?
_, n = eta.shape
listing = []
for i in range(0, n):
listing.append(p)
Object=Basis('total-order',listing)
# Establish n Parameter objects
params = []
P = Parameter(order=p, lower=-1, upper=1, distribution='uniform')
for i in range(0, n):
params.append(P)
# Use the params list to establish the Poly object
poly_obj = Poly(params, Object, method='least-squares')
V = poly_obj.get_poly(eta)
V = V.T
return V, poly_obj
def vector_AS(list_of_polys, R = None, alpha=None, k=None, samples=None, bootstrap=False, bs_trials = 50
, J = None, save_path = None):
# Find AS directions to vector val func
# analogous to computeActiveSubspace
# Since we are dealing with *one* vector val func we should have just one input space
# Take the first of the polys.
poly = list_of_polys[0]
if samples is None:
d = poly.dimensions
if alpha is None:
alpha = 4
if k is None or k > d:
k = d
M = int(alpha * k * np.log(d))
X = np.zeros((M, d))
for j in range(0, d):
X[:, j] = np.reshape(poly.parameters[j].getSamples(M), M)
else:
X = samples
M, d = X.shape
n = len(list_of_polys) # number of outputs
if R is None:
R = np.eye(n)
elif len(R.shape) == 1:
R = np.diag(R)
if J is None:
J = jacobian_vec(list_of_polys,X)
if not(save_path is None):
np.save(save_path,J)
J_new = np.matmul(sqrtm(R), np.transpose(J,[2,0,1]))
JtJ = np.matmul(np.transpose(J_new,[0,2,1]), J_new)
H = np.mean(JtJ,axis=0)
# Compute P_r by solving generalized eigenvalue problem...
# Assume sigma = identity for now
e, W = np.linalg.eigh(H)
eigs = np.flipud(e)
eigVecs = np.fliplr(W)
if bootstrap:
all_bs_eigs = np.zeros((bs_trials, d))
all_bs_W = []
for t in range(bs_trials):
print("Starting bootstrap trial %d"%t)
bs_samples = X[np.random.randint(0,M,size=M), :]
J_bs = jacobian_vec(list_of_polys, bs_samples)
J_new_bs = np.matmul(sqrtm(R), np.transpose(J_bs,[2,0,1]))
JtJ_bs = np.matmul(np.transpose(J_new_bs, [0, 2, 1]), J_new_bs)
H_bs = np.mean(JtJ_bs, axis=0)
# Compute P_r by solving generalized eigenvalue problem...
# Assume sigma = identity for now
e_bs, W_bs = np.linalg.eigh(H_bs)
all_bs_eigs[t,:] = np.flipud(e_bs)
eigVecs_bs = np.fliplr(W_bs)
all_bs_W.append(eigVecs_bs)
eigs_bs_lower = np.min(all_bs_eigs, axis = 0)
eigs_bs_upper = np.max(all_bs_eigs, axis = 0)
return eigs,eigVecs,eigs_bs_lower,eigs_bs_upper, all_bs_W
else:
return eigs,eigVecs
def jacobian_vp(V, V_plus, U, f, Polybasis, eta, minmax, X):
M, N = V.shape
m, n = U.shape
Gradient = Polybasis.get_poly_grad(eta)
sub = (minmax[1,:]-minmax[0,:]).T
vectord = np.reshape(2.0/sub,(n,1))
# Initialize the tensor
J = np.zeros((M, m, n))
# Obtain the derivative of this tensor
dV = np.zeros((m, n, M, N))
for l in range(0, n):
for j in range(0, N):
current = Gradient[l].T
if n == 1:
current = Gradient.T
dV[:,l,:,j] = np.asscalar(vectord[l])*(X.T*current[:,j])
# Get the P matrix
P = np.identity(M)-np.matmul(V,V_plus)
V_minus = scipy.linalg.pinv(V)
# Calculate entries for the tensor
for j in range(0,m):
for k in range(0,n):
temp1 = np.linalg.multi_dot([P,dV[j,k,:,:],V_minus])
J[:, j, k]=(-np.matmul((temp1+temp1.T),f)).reshape((M,)) # Eqn 15
return J
def jacobian_vec(list_of_poly, X):
m = len(list_of_poly)
[N, d] = X.shape
J = np.zeros((m, d, N))
for p in range(len(list_of_poly)):
J[p, :, :] = list_of_poly[p].get_polyfit_grad(X)
return J
def subspace_dist(U, V):
if len(U.shape) == 1:
return np.linalg.norm(np.outer(U, U) - np.outer(V, V), ord=2)
else:
return np.linalg.norm(np.dot(U, U.T) - np.dot(V, V.T), ord=2)
def linear_program_ineq(c, A, b):
c = c.reshape((c.size,))
b = b.reshape((b.size,))
# make unbounded bounds
bounds = []
for i in range(c.size):
bounds.append((None, None))
A_ub, b_ub = -A, -b
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bounds, options={"disp": False}, method='simplex')
if res.success:
return res.x.reshape((c.size, 1))
else:
np.savez('bad_scipy_lp_ineq_{:010d}'.format(np.random.randint(int(1e9))),
c=c, A=A, b=b, res=res)
raise Exception('Scipy did not solve the LP. Blame Scipy.')
def get_unique_rows(X0):
X1 = X0.view(np.dtype((np.void, X0.dtype.itemsize * X0.shape[1])))
return np.unique(X1).view(X0.dtype).reshape(-1, X0.shape[1])
def _null_space(A, rcond=None):
"""
null space method adapted from scipy.
"""
u, s, vh = scipy.linalg.svd(A, full_matrices=True)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = np.finfo(s.dtype).eps * max(M, N)
tol = | np.amax(s) | numpy.amax |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import unittest
import datetime
import warnings
import jsonpickle
from jsonpickle.compat import PY2, PY3, PY_MINOR
from helper import SkippableTest
try:
import numpy as np
import numpy.testing as npt
from numpy.compat import asbytes
from numpy.testing import assert_equal
except ImportError:
np = None
class NumpyTestCase(SkippableTest):
def setUp(self):
if np is None:
self.should_skip = True
return
self.should_skip = False
import jsonpickle.ext.numpy
jsonpickle.ext.numpy.register_handlers()
def tearDown(self):
if self.should_skip:
return
import jsonpickle.ext.numpy
jsonpickle.ext.numpy.unregister_handlers()
def roundtrip(self, obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_dtype_roundtrip(self):
if self.should_skip:
return self.skip('numpy is not importable')
dtypes = [
np.int,
np.float,
np.complex,
np.int32,
np.str,
np.object,
np.unicode,
np.dtype('f4,i4,f2,i1'),
np.dtype(('f4', 'i4'), ('f2', 'i1')),
np.dtype('1i4', align=True),
np.dtype('M8[7D]'),
np.dtype({'names': ['f0', 'f1', 'f2'],
'formats': ['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
]
if not PY2:
dtypes.extend([
np.dtype([('f0', 'i4'), ('f2', 'i1')]),
np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])]),
])
for dtype in dtypes:
self.assertEqual(self.roundtrip(dtype), dtype)
def test_generic_roundtrip(self):
if self.should_skip:
return self.skip('numpy is not importable')
values = [
np.int_(1),
np.int32(-2),
np.float_(2.5),
np.nan,
-np.inf,
np.inf,
np.datetime64('2014-01-01'),
np.str_('foo'),
np.unicode_('bar'),
np.object_({'a': 'b'}),
np.complex_(1 - 2j)
]
for value in values:
decoded = self.roundtrip(value)
assert_equal(decoded, value)
self.assertTrue(isinstance(decoded, type(value)))
def test_ndarray_roundtrip(self):
if self.should_skip:
return self.skip('numpy is not importable')
arrays = [
np.random.random((10, 20)),
np.array([[True, False, True]]),
np.array(['foo', 'bar']),
np.array(['baz'.encode('utf-8')]),
np.array(['2010', 'NaT', '2030']).astype('M'),
np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3),
np.rec.array([(1, 11, 'a'), (2, 22, 'b'),
(3, 33, 'c'), (4, 44, 'd'),
(5, 55, 'ex'), (6, 66, 'f'),
(7, 77, 'g')],
formats='u1,f4,a1'),
np.array(['1960-03-12', datetime.date(1960, 3, 12)],
dtype='M8[D]'),
np.array([0, 1, -1, np.inf, -np.inf, np.nan], dtype='f2'),
]
if not PY2:
arrays.extend([
np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', 'f4')])
])
for array in arrays:
decoded = self.roundtrip(array)
assert_equal(decoded, array)
self.assertEqual(decoded.dtype, array.dtype)
def test_shape(self):
"""test that shapes containing zeros, which cannot be represented as nested lists, are deserialized correctly"""
a = np.eye(3)[3:]
_a = self.roundtrip(a)
npt.assert_array_equal(a, _a)
def test_accuracy(self):
"""test if the string representation maintains accuracy"""
rand = np.random.randn(3, 3)
_rand = self.roundtrip(rand)
npt.assert_array_equal(rand, _rand)
def test_b64(self):
"""test that binary encoding works"""
a = np.random.rand(10, 10) # array of substantial size is stored as b64
_a = self.roundtrip(a)
npt.assert_array_equal(a, _a)
def test_views(self):
"""Test that views are maintained under serialization"""
rng = np.arange(20) # a range of an array
view = rng[10:] # a view referencing a portion of an array
data = [rng, view]
_data = self.roundtrip(data)
_data[0][15] = -1
self.assertEqual(_data[1][5], -1)
def test_strides(self):
"""test that cases with non-standard strides and offsets work correctly"""
arr = np.eye(3)
view = arr[1:, 1:]
self.assertTrue(view.base is arr)
data = [arr, view]
_data = self.roundtrip(data)
# test that the deserialized arrays indeed view the same memory
_arr, _view = _data
_arr[1, 2] = -1
self.assertEqual(_view[0, 1], -1)
self.assertTrue(_view.base is _arr)
def test_weird_arrays(self):
"""test that we disallow serialization of references to arrays that do not effectively own their memory"""
a = np.arange(9)
b = a[5:]
a.strides = 1
# this is kinda fishy; a has overlapping memory, _a does not
if PY2 or (PY3 and PY_MINOR <= 3):
warn_count = 0
else:
warn_count = 1
with warnings.catch_warnings(record=True) as w:
_a = self.roundtrip(a)
self.assertEqual(len(w), warn_count)
| npt.assert_array_equal(a, _a) | numpy.testing.assert_array_equal |
import argparse
import os
import time
from multiprocessing import Event, Manager, Process, Queue
from operator import itemgetter
import geopandas as gpd
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from gerrychain import Graph, MarkovChain, Partition
from gerrychain.accept import always_accept
from gerrychain.constraints import (
LowerBound,
UpperBound,
Validator,
WithinPercentRangeOfBounds,
)
from gerrychain.proposals import propose_chunk_flip, propose_random_flip
from gerrychain.random import random # FOR REPRODUCIBILITY
from gerrychain.updaters import Tally
from tqdm import trange
from twilio.rest import Client
def load_data(city, state, st_FIPS):
"""
Loads census data and HOLC representations from disk.
Parameters
----------
city : string
example: "Atlanta"
state : string
example: "GA"
st_FIPS : string
example: "130"
Returns
-------
race_matrix : geopandas.GeoDataFrame
rows are relevant, non-empty tracts, columns are racial groups.
"""
# Load race data
FILEPATH = "./data/block_2010_data/nhgis0005_csv"
relevant_cols = [
"GISJOIN",
"STATEA",
"COUNTYA",
"H7X001",
"H7X002",
"H7X003",
"H7X004",
"H7X005",
"H7X006",
"H7X007",
"H7X008",
]
race_raw = pd.read_csv(
f"{FILEPATH}/nhgis0005_ds172_2010_block.csv",
usecols=relevant_cols,
dtype={"GISJOIN": str, "STATEA": str, "COUNTYA": str},
)
column_mapper = dict(
zip(
relevant_cols,
[
"GISJOIN",
"state_fips",
"county_fips",
"total",
"white",
"black",
"american_indian_al_native",
"asian",
"hawaiian_pac_islander",
"other",
"two_plus",
],
)
)
race_raw.rename(columns=column_mapper, inplace=True)
print("Race data loaded.")
race_raw.set_index("GISJOIN", inplace=True)
# Load relevant shapefile and crosswalks
city_blocks = gpd.read_file(
f"./data/block_2010_data/nhgis0005_shape/nhgis0005_shapefile_tl2010_{st_FIPS}_block_2010/{state}_block_2010.shp"
).set_index("GEOID10")
city_rl_cw = pd.read_csv(
f"./data/outputs/{city}_blocks_2010_crosswalk.csv", dtype={"block_id_2010": str}
).set_index("block_id_2010")
city_blocks = (
city_blocks.join(city_rl_cw, how="outer").dropna().set_index("GISJOIN")
)
print("City tract data loaded.")
# join shapefile with race data
city = city_blocks.join(race_raw, how="outer").dropna()
# filter to create R
R = (
city.groupby("holc_id_uq")
.sum()
.filter(
[
"total",
"white",
"black",
"american_indian_al_native",
"asian",
"hawaiian_pac_islander",
"hawaiian_pac_islander",
"other",
"two_plus",
]
)
)
# find empty districts, if any exist
empty_districts = np.array(R.loc[(R.total == 0)].index)
# build race matrix
race_matrix = city.filter(
[
"total",
"white",
"black",
"american_indian_al_native",
"asian",
"hawaiian_pac_islander",
"other",
"two_plus",
"holc_id_uq",
"geometry",
]
)
race_matrix.rename(columns={"holc_id_uq": "partition"}, inplace=True)
# remove districts with population 0
race_matrix = race_matrix[~race_matrix["partition"].isin(empty_districts)]
return race_matrix
# entropy implementation
def city_entropy(R, P):
"""
Computes entropy of a city-region (see White, 1986).
Parameters
----------
R : numpy.ndarray
i-by-j matrix, where i=districts and j=ethnicities.
P : numpy.array
i-length vector of the total population in a city-region.
Returns
-------
int
citywide segregation entropy score.
"""
# define key terms in algorithm
N = sum(P)
R_prop = | np.apply_along_axis(lambda column: column / P, 0, R) | numpy.apply_along_axis |
import logging
logging.basicConfig(filename='hrmonitorlog.txt', format='%(levelname)s \
%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
def beats(filename):
"""module that detects the number of heartbeats in an ECG signal
:param filename: the file name of the ECG input
:returns beats: numpy array of times where a heart beat occured
"""
import numpy as np
import scipy.signal as signal
from signal_processing import signal_processing
from extract_data import extract_time_data
logging.info("beats: everything imported")
time = extract_time_data(filename)
corr = signal_processing(filename)
peaks = signal.find_peaks_cwt(corr, | np.arange(1, 300) | numpy.arange |
"""
API for kernel functions:
`function(W, *args, zn=True)`
W: Window size (in)
*args: Additional positional arguments to the function as parameters (must be cast-able to float)
zn: Boolean that indicates if the kernel function integrates to zero (default is True).
"""
import numpy as np
from discrete_shocklets.utils import zero_norm
registered_kernel_functions = []
def register_kernel_function(kernel_function):
registered_kernel_functions.append(kernel_function)
return kernel_function
@register_kernel_function
def haar(L, zn=True):
res = -1 * | np.ones(L) | numpy.ones |
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import *
import numpy as nmp
import matplotlib
import matplotlib.pyplot as plt
import scipy.linalg
CODE_GEN = 1
COMPILE = 1
FORMULATION = 2 # 0 for hexagon 2 SCQP sphere
i_d_ref = 1.484
i_q_ref = 1.429
w_val = 200
i_d_ref = -20
i_q_ref = 20
w_val = 300
udc = 580
u_max = 2/3*udc
x0 = nmp.array([0.0, 0.0])
# fitted psi_d map
def psi_d_num(x,y):
# This function was generated by the Symbolic Math Toolbox version 8.0.
# 07-Feb-2018 23:07:49
psi_d_expression = x*(-4.215858085639979e-3) + \
exp(y**2*(-8.413493151721978e-5))* \
atan(x*1.416834085282644e-1)*8.834738694115108e-1
return psi_d_expression
def psi_q_num(x,y):
# This function was generated by the Symbolic Math Toolbox version 8.0.
# 07-Feb-2018 23:07:50
psi_q_expression = y*1.04488335702649e-2+ \
exp(x**2*(-1.0/7.2e1))*atan(y)*6.649036351062812e-2
return psi_q_expression
psi_d_ref = psi_d_num(i_d_ref, i_q_ref)
psi_q_ref = psi_q_num(i_d_ref, i_q_ref)
# compute steady-state u
Rs = 0.4
u_d_ref = Rs*i_d_ref - w_val*psi_q_ref
u_q_ref = Rs*i_q_ref + w_val*psi_d_ref
def export_rsm_model():
model_name = 'rsm'
# constants
theta = 0.0352
Rs = 0.4
m_load = 0.0
J = nmp.array([[0, -1], [1, 0]])
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# set up algebraic variables
i_d = SX.sym('i_d')
i_q = SX.sym('i_q')
z = vertcat(i_d, i_q)
# set up xdot
psi_d_dot = SX.sym('psi_d_dot')
psi_q_dot = SX.sym('psi_q_dot')
xdot = vertcat(psi_d_dot, psi_q_dot)
# set up parameters
w = SX.sym('w') # speed
dist_d = SX.sym('dist_d') # d disturbance
dist_q = SX.sym('dist_q') # q disturbance
p = vertcat(w, dist_d, dist_q)
# build flux expression
Psi = vertcat(psi_d_num(i_d, i_q), psi_q_num(i_d, i_q))
# dynamics
f_impl = vertcat( psi_d_dot - u_d + Rs*i_d - w*psi_q - dist_d, \
psi_q_dot - u_q + Rs*i_q + w*psi_d - dist_q, \
psi_d - Psi[0], \
psi_q - Psi[1])
model = AcadosModel()
model.f_impl_expr = f_impl
model.f_expl_expr = []
model.x = x
model.xdot = xdot
model.u = u
model.z = z
model.p = p
model.name = model_name
# BGP constraint
r = SX.sym('r', 2, 1)
model.con_phi_expr = r[0]**2 + r[1]**2
model.con_r_expr = vertcat(u_d, u_q)
model.con_r_in_phi = r
return model
def get_general_constraints_DC(u_max):
# polytopic constraint on the input
r = u_max
x1 = r
y1 = 0
x2 = r*cos(pi/3)
y2 = r*sin(pi/3)
q1 = -(y2 - y1/x1*x2)/(1-x2/x1)
m1 = -(y1 + q1)/x1
# q1 <= uq + m1*ud <= -q1
# q1 <= uq - m1*ud <= -q1
# box constraints
m2 = 0
q2 = r*sin(pi/3)
# -q2 <= uq <= q2
# form D and C matrices
# (acados C interface works with column major format)
D = nmp.transpose(nmp.array([[1, m1],[1, -m1]]))
D = nmp.array([[m1, 1],[-m1, 1]])
C = nmp.transpose(nmp.array([[0, 0], [0, 0]]))
ug = nmp.array([-q1, -q1])
lg = nmp.array([+q1, +q1])
lbu = nmp.array([-q2])
ubu = nmp.array([+q2])
res = dict()
res["D"] = D
res["C"] = C
res["lg"] = lg
res["ug"] = ug
res["lbu"] = lbu
res["ubu"] = ubu
return res
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# export model
model = export_rsm_model()
ocp.model = model
if FORMULATION == 2:
# constraints name
ocp.constraints.constr_type = 'BGP'
# Ts = 0.0016
# Ts = 0.0012
Ts = 0.0008
# Ts = 0.0004
nx = model.x.size()[0]
nu = model.u.size()[0]
nz = model.z.size()[0]
np = model.p.size()[0]
ny = nu + nx
ny_e = nx
N = 2
Tf = N*Ts
# set number of shooting intervals
ocp.dims.N = N
# set cost module
Q = nmp.eye(nx)
Q[0,0] = 5e2
Q[1,1] = 5e2
R = nmp.eye(nu)
R[0,0] = 1e-4
R[1,1] = 1e-4
ocp.cost.W = scipy.linalg.block_diag(Q, R)
Vx = nmp.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
ocp.cost.Vx = Vx
Vu = nmp.zeros((ny, nu))
Vu[2,0] = 1.0
Vu[3,1] = 1.0
ocp.cost.Vu = Vu
Vz = | nmp.zeros((ny, nz)) | numpy.zeros |
import tensorflow as tf
import numpy as np
class ConvNetMixin(object):
def _conv_layer_with_relu(self, bottom, filter_size, filter_num, scope_name, bottom_channel=None, padding='SAME'):
out = self._conv_layer(bottom, filter_size, filter_num, scope_name, bottom_channel, padding)
with tf.variable_scope(scope_name):
relu = tf.nn.relu(out, name=scope_name)
return relu
def _conv_layer(self, bottom, filter_size, filter_num, scope_name, bottom_channel=None, padding='SAME'):
if not bottom_channel:
_, _, _, bottom_channel = bottom.get_shape().as_list()
with tf.variable_scope(scope_name):
kernel = tf.Variable(
tf.truncated_normal([*filter_size, bottom_channel, filter_num], dtype=tf.float32, stddev=1e-1),
trainable=False,
name='weights'
)
conv = tf.nn.conv2d(bottom, kernel, [1, 1, 1, 1], padding=padding)
biases = tf.Variable(
tf.constant(0.0, shape=[filter_num], dtype=tf.float32),
trainable=True,
name='bias'
)
out = tf.nn.bias_add(conv, biases)
return out
def _max_pool(self, bottom, name):
return tf.nn.max_pool(
bottom,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name=name
)
class AnchorTargetMixin(object):
def __init__(self, debug):
self._anchors = self.generate_anchors(scales=(8, 16, 32))
self._debug = debug
self._feat_stride = 16
def generate_anchors(self, scales):
base_size = 16
ratios = np.array([0.5, 1, 2])
base_anchor = np.array([1, 1, base_size, base_size]) - 1
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
w_ratios = np.round(np.sqrt(w * h / ratios))
h_ratios = np.round( | np.sqrt(ratios * w * h) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>, CWI, Amsterdam
<EMAIL>
TODO
fhe full
data of one of the data sets described in
"A Cone-Beam X-Ray CT Data Collection Designed for Machine Learning" by
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
"""
import numpy as np
import astra
import os
import imageio
import time
import matplotlib.pyplot as plt
import nesterov_gradient
from scipy.interpolate import RegularGridInterpolator as rgi
def rotate_astra_vec_geom(vecs, theta):
s = np.asmatrix(vecs[:,0:3])
d = np.asmatrix(vecs[:,3:6])
u = np.asmatrix(vecs[:,6:9])
v = np.asmatrix(vecs[:,9:12])
du = d + u
dv = d + v
rot_mat = np.matrix([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
s = s * rot_mat.transpose()
d = d * rot_mat.transpose()
du = du * rot_mat.transpose()
dv = dv * rot_mat.transpose()
u = du - d
v = dv - d
vecs = np.concatenate((np.asarray(s), np.asarray(d), np.asarray(u), np.asarray(v)), axis=1)
return vecs
#### user defined settings ####################################################
# select the ID of the sample you want to reconstruct
walnut_id = 1
# define a sub-sampling factor in angular direction
# (all reference reconstructions are computed with full angular resolution)
angluar_sub_sampling = 1
# select of voxels per mm in one direction (higher = larger res)
# volume size in one direction will be 50 * voxel_per_mm + 1
voxel_per_mm = 10
# the number of slices to be extracted will be number of voxels in one direction
# times this factor
radial_slice_fac = np.sqrt(2)
# to avoid artefacts from the radial slicing, we compute multiple iterative
# reconstructions with rotated geometries and only extract the radial slices that are close
# t0 0 and 90 degrees. n_div is the number of reconstruction
n_div = 24
# we enter here some intrinsic details of the dataset needed for our reconstruction scripts
# set the variable "data_path" to the path where the dataset is stored on your own workstation
data_path = '/bigstore/felix/Walnuts/'
# set the variable "recon_path" to the path where you would like to store the
# reconstructions you compute
rad_slice_path = '/bigstore/felix/WalnutsRadialSlices/'
# set index of gpu to use
gpu_index = 3;
astra.astra.set_gpu_index(gpu_index)
print('computing Walnut', walnut_id, ',on GPU', gpu_index, flush=True)
#### general settings #########################################################
# projection index
# there are in fact 1201, but the last and first one come from the same angle
projs_idx = range(0,1200, angluar_sub_sampling)
nb_projs_orbit = len(projs_idx)
projs_name = 'scan_{:06}.tif'
dark_name = 'di000000.tif'
flat_name = ['io000000.tif', 'io000001.tif']
vecs_name = 'scan_geom_corrected.geom'
projs_rows = 972
projs_cols = 768
# transformation to apply to each image, we need to get the image from
# the way the scanner reads it out into to way described in the projection
# geometry
trafo = lambda image : np.transpose(np.flipud(image))
# size of the reconstruction volume in voxels
n_x = 50 * voxel_per_mm + 1
# size of a cubic voxel in mm
vox_sz = 1/voxel_per_mm
# number of radial slices to be extracted
n_rad = int( | np.round(n_x * radial_slice_fac) | numpy.round |
import argparse
import cv2
import mediapipe as mp
import numpy as np
import csv
from custom.iris_lm_depth import from_landmarks_to_depth
from videosource import FileSource, WebcamSource
mp_face_mesh = mp.solutions.face_mesh
left_eye_landmarks_id = np.array([33, 133])
right_eye_landmarks_id = np.array([362, 263])
dist_coeff = np.zeros((4, 1))
YELLOW = (0, 255, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
RED = (0, 0, 255)
SMALL_CIRCLE_SIZE = 1
LARGE_CIRCLE_SIZE = 2
LEFT_EYE_LANDMARKS_ID = | np.array([33, 133]) | numpy.array |
#!/usr/bin/env python
# Test QP solver against Maros Mezaros Benchmark suite
from __future__ import print_function
import sys
import scipy.io as spio
import scipy.sparse as spspa
import scipy as sp
import numpy as np
import ipdb
import mathprogbasepy as mpbpy
import mathprogbasepy.quadprog.problem as mpbpy_prob
def load_maros_meszaros_problem(f):
# Load file
m = spio.loadmat(f)
# Convert matrices
P = m['Q'].astype(float)
n = P.shape[0]
q = m['c'].T.flatten().astype(float)
A = m['A'].astype(float)
A = spspa.vstack([A, spspa.eye(n)])
u = np.append(m['ru'].T.flatten().astype(float),
m['ub'].T.flatten().astype(float))
l = np.append(m['rl'].T.flatten().astype(float),
m['lb'].T.flatten().astype(float))
# Define problem
p = mpbpy.QuadprogProblem(P, q, A, l, u)
return p
def main():
sp.random.seed(1)
# Possible ops: {'small1', 'small2', 'random',
# 'primal_infeasible', 'random_primal_infeasible',
# 'maros_meszaros', 'lp', 'dual_infeasible_lp',
# 'dual_infeasible_qp'}
example = 'random_primal_infeasible'
if example == 'maros_meszaros':
# Maros Meszaros Examples
# f = 'tests/maros_meszaros/CVXQP2_S.mat'
# f = 'tests/maros_meszaros/CVXQP1_S.mat'
# f = 'tests/maros_meszaros/AUG2D.mat'
f = 'maros_meszaros/CONT-200.mat'
# f = 'tests/maros_meszaros/PRIMAL3.mat'
# f = 'tests/maros_meszaros/QBANDM.mat'
p = load_maros_meszaros_problem(f)
elif example == 'small1':
# Our Examples
# Small Example 1
P = spspa.csc_matrix(np.array([[4., 1.], [1., 2.]]))
q = np.ones(2)
A = spspa.vstack([spspa.csc_matrix(np.ones((1, 2))),
spspa.eye(P.shape[0])]).tocsc()
l = np.array([1.0, 0.0, 0.0])
u = np.array([1.0, 0.7, 0.7])
p = mpbpy.QuadprogProblem(P, q, A, l, u)
elif example == 'small2':
# Small Example 2
P = spspa.csc_matrix(np.array([[11., 0.], [0., 0.]]))
q = np.array([3, 4])
A = spspa.csc_matrix(np.array([[-1, 0], [0, -1], [-1, -3],
[2, 5], [3, 4]]))
u = np.array([0., 0., -15, 100, 80])
l = -np.inf * np.ones(len(u))
p = mpbpy.QuadprogProblem(P, q, A, l, u)
elif example == 'primal_infeasible':
# primal_infeasible example
# P = spspa.eye(2)
P = spspa.csc_matrix((2, 2))
q = np.ones(2)
A = spspa.csc_matrix(np.array([[1, 0], [0, 1], [1, 1]]))
l = np.array([0., 0., -1.])
u = np.array([np.inf, np.inf, -1.])
p = mpbpy.QuadprogProblem(P, q, A, l, u)
elif example == 'random_primal_infeasible':
# Random Example
n = 50
m = 500
# Generate random Matrices
Pt = sp.randn(n, n)
P = spspa.csc_matrix(np.dot(Pt.T, Pt))
q = sp.randn(n)
A = spspa.csc_matrix(sp.randn(m, n))
u = 3 + sp.randn(m)
# l = u
l = -3 + sp.randn(m)
# Make random problem primal_infeasible
A[int(n/2), :] = A[int(n/2)+1, :]
l[int(n/2)] = u[int(n/2)+1] + 100 * sp.rand()
u[int(n/2)] = l[int(n/2)] + 0.5
# l[int(n/3)] = u[int(n/3)] + 100 * sp.rand()
# l[int(n/4)] = u[int(n/4)] + 50. * sp.rand()
p = mpbpy.QuadprogProblem(P, q, A, l, u)
elif example == 'dual_infeasible_lp':
# Dual infeasible example
P = spspa.csc_matrix((2, 2))
q = np.array([2, -1])
A = spspa.eye(2)
l = np.array([0., 0.])
u = np.array([np.inf, np.inf])
p = mpbpy.QuadprogProblem(P, q, A, l, u)
elif example == 'dual_infeasible_qp':
# Dual infeasible example
P = spspa.csc_matrix(np.diag(np.array([4., 0.])))
q = np.array([0, 2])
A = spspa.csc_matrix([[1., 1.], [-1., 1.]])
l = np.array([-np.inf, -np.inf])
u = np.array([2., 3.])
p = mpbpy.QuadprogProblem(P, q, A, l, u)
elif example == 'random':
# Random Example
n = 30
m = 50
# Generate random Matrices
Pt = sp.randn(n, n)
P = spspa.csc_matrix( | np.dot(Pt.T, Pt) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.linalg import sqrtm
from collections import OrderedDict
from GLM.GLM_Model.PoissonVariational import PoissonVariational
from GLM.GLM_Model.PoissonMAP import PoissonMAP
from GLM.GLM_Model import GLM_Model_GP, GLM_Model_MAP, GP_Covariate, MAP_Covariate
from Utils import utils
class Model_Runner:
def __init__(self, params):
self.params = params
self.poisson_model = None
self.variational_model = None
self.map_model = None
self.ml_model = None
self.data_df = None
self.hist_data = None
self.stim_data = None
def initialize_design_matrices_demo(self, data_df):
self.data_df = data_df
self.covariate_data = OrderedDict()
if 'History' not in self.data_df.index:
raise KeyError('"History" needs to be a data field')
else:
self.spike_data = self.data_df.loc['History', 'data']
for covariate in self.data_df.index:
self.covariate_data[covariate] = self.data_df.loc[covariate, 'data']
self.params.num_test_trials = np.floor(self.spike_data.shape[0] * 1e-2 * self.params.percent_test_trials).astype(np.int)
def create_variational_covariates_demo(self):
self.kernel_prep_dict = {'chol': ['Kuu'], 'inv': ['Kuu']}
self.glm_gp = GLM_Model_GP.GLM_Model_GP(self.params)
self.glm_gp.add_spike_history(self.spike_data)
def add_covariate(self, covariate):
self.glm_gp.add_covariate(covariate)
def train_demo(self):
self.variational_model = PoissonVariational(self.params, self.data_df, self.glm_gp, self.kernel_prep_dict)
self.variational_model.initialize_variational_model()
self.variational_model.train_variational_parameters()
def initialize_design_matrices(self):
self.data_df = pd.read_pickle(self.params.expt_problem_data_path)
self.hist_data = self.data_df.loc['History', 'data']
self.spike_data = self.data_df.loc['History', 'data']
self.stim1_data = self.data_df.loc['Stim1', 'data']
self.stim2_data = self.data_df.loc['Stim2', 'data']
self.stim3_data = self.data_df.loc['Stim3', 'data']
self.hist_data_b = self.data_df.loc['Coupling_b', 'data']
self.hist_data_c = self.data_df.loc['Coupling_c', 'data']
self.params.num_test_trials = np.floor(self.hist_data.shape[0] * 1e-2 * self.params.percent_test_trials).astype(np.int)
def create_variational_covariates(self):
kernel_prep_dict = {'chol': ['Kuu'], 'inv': ['Kuu']}
# create glm object
glm_gp = GLM_Model_GP.GLM_Model_GP(self.params)
glm_gp.add_spike_history(self.spike_data)
# history filter parameters
hist_etc_params = {'use_exp_mean': True,
'use_basis_form': False}
hist_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 0.25],
'alpha': [100, 100000],
'gamma': [100, 100000],
'sigma': [0.1, 15],
'kernel_epsilon_noise_std': [1e-4, 5],
'gain': [-15, -3],
'tau': [1e-4, 3e-3]
}
hist_time_params = {'filter_offset': 1,
'filter_duration': 110,
'time_plot_min': 1,
'time_plot_max': 115,
'inducing_pt_spacing_init': 2,
'is_hist': True}
hist_gp_params = {'alpha': [750.0, True],
'gamma': [1000.0, True],
'sigma': [np.sqrt(4), True],
'gain': [-5, False],
'tau': [1e-3, False],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
hist = GP_Covariate.GP_Covariate(self.params, hist_etc_params, self.hist_data,
name='History',
use_bases=False)
hist.add_bounds_params(hist_bounds)
hist.add_gp_params(hist_gp_params)
hist.add_time_init(hist_time_params)
glm_gp.add_covariate(hist)
##########################################
# Stim1
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 2.0],
'alpha': [50, 5000],
'gamma': [10, 3000],
'sigma': [0.1, 15],
'b': [300e-3, 800e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 0,
'filter_duration': 1000,
'time_plot_min': 0,
'time_plot_max': 1100,
'inducing_pt_spacing_init': 15}
cue_gp_params = {'alpha': [100.0, True], 'gamma': [600.0, True], 'sigma': [np.sqrt(4), True],
'b': [500e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.stim1_data, name='Stim1')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Stim2
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [-300e-3, 300e-3],
'alpha': [50, 5000],
'gamma': [10, 3000],
'sigma': [0.1, 15],
'b': [0e-3, 200e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': -250,
'filter_duration': 500,
'time_plot_min': -300,
'time_plot_max': 300,
'inducing_pt_spacing_init': 15}
cue_gp_params = {'alpha': [100.0, True], 'gamma': [600.0, True], 'sigma': [np.sqrt(4), True],
'b': [100e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.stim2_data, name='Stim2')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Stim1
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 500e-3],
'alpha': [50, 5000],
'gamma': [10, 3000],
'sigma': [0.1, 15],
'b': [100e-3, 500e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 0,
'filter_duration': 400,
'time_plot_min': 0,
'time_plot_max': 500,
'inducing_pt_spacing_init': 15}
cue_gp_params = {'alpha': [100.0, True], 'gamma': [600.0, True], 'sigma': [np.sqrt(4), True],
'b': [250e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.stim3_data, name='Stim3')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Coupling a
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 200],
'alpha': [50, 10000],
'gamma': [10, 5000],
'sigma': [0.1, 15],
'b': [5e-3, 100e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 1,
'filter_duration': 100,
'time_plot_min': 0,
'time_plot_max': 150,
'inducing_pt_spacing_init': 2}
cue_gp_params = {'alpha': [5000.0, True], 'gamma': [1000.0, True], 'sigma': [np.sqrt(9), True],
'b': [15e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.hist_data_b, name='Coupling_b')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Coupling b
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 200],
'alpha': [50, 10000],
'gamma': [10, 5000],
'sigma': [0.1, 15],
'b': [5e-3, 100e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 1,
'filter_duration': 100,
'time_plot_min': 0,
'time_plot_max': 150,
'inducing_pt_spacing_init': 2}
cue_gp_params = {'alpha': [5000.0, True], 'gamma': [1000.0, True], 'sigma': [np.sqrt(9), True],
'b': [30e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.hist_data_c, name='Coupling_c',)
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
self.variational_model = PoissonVariational(self.params, self.data_df, glm_gp, kernel_prep_dict)
self.variational_model.initialize_variational_model()
def create_map_covariates(self):
glm_map = GLM_Model_MAP.GLM_Model_MAP(self.params)
glm_map.add_spike_history(self.spike_data)
###################################
# History
###################################
hist_bounds = {'m': [-np.inf, np.inf],
'r': [0, np.inf]}
hist_bases_params = {'bases_fn': utils.create_nonlinear_raised_cos,
'duration': 150,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': self.params.duration_hist,
'filter_offset': 1,
'time_plot_min': 1,
'time_plot_max': 100}
# hist = MAP_Covariate.MAP_Covariate(self.params, y, name='History', is_cov=False, is_hist=True)
hist = MAP_Covariate.MAP_Covariate(self.params, self.hist_data, name='History', is_cov=False, is_hist=True)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
# stimulus 1 parameters
cue_bounds = {'m': [-np.inf, np.inf],
'r': [0, 5]}
cue_bases_params = {'bases_fn': utils.create_nonlinear_raised_cos,
'num_bases': 15,
'duration': 1000, # self.params.duration_cov,
'bin_size': self.params.delta,
'end_point': 600e-3,
'start_point': 0,
'nl_offset': 1.3e-2,
'offset': 0,
'filter_duration': 1500,
'filter_offset': 0,
'time_plot_min': 0,
'time_plot_max': 1500}
cue = MAP_Covariate.MAP_Covariate(self.params, self.stim1_data, name='Stim1', is_cov=True, is_hist=False)
cue.add_bounds_params(cue_bounds)
cue.add_bases_params(cue_bases_params)
glm_map.add_covariate(cue)
######################
# Lick Init
######################
lick_init_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
lick_init_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 500,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 1,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': -250,
'filter_duration': 500,
'filter_offset': -250,
'time_plot_min': -250,
'time_plot_max': 250}
lick_init = MAP_Covariate.MAP_Covariate(self.params, self.stim2_data, name='Stim2', is_cov=True, is_hist=False)
lick_init.add_bounds_params(lick_init_bounds)
lick_init.add_bases_params(lick_init_bases_params)
glm_map.add_covariate(lick_init)
###################
# Lick Train
###################
lick_train_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
lick_train_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 500,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 1,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 0,
'filter_duration': 500,
'filter_offset': 0,
'time_plot_min': 0,
'time_plot_max': 500}
lick_train = MAP_Covariate.MAP_Covariate(self.params, self.stim3_data, name='Stim3', is_cov=True, is_hist=False)
lick_train.add_bounds_params(lick_train_bounds)
lick_train.add_bases_params(lick_train_bases_params)
glm_map.add_covariate(lick_train)
###################################
# Coupling a
###################################
hist_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
hist_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 125,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': 125,
'filter_offset': 1,
'time_plot_min': 1,
'time_plot_max': 125}
# hist = MAP_Covariate.MAP_Covariate(self.params, y, name='History', is_cov=False, is_hist=True)
hist = MAP_Covariate.MAP_Covariate(self.params, self.hist_data_b, name='Coupling_b', is_cov=False, is_hist=False)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
###################################
# Coupling b
###################################
hist_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
hist_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 125,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': 125,
'filter_offset': 1,
'time_plot_min': 1,
'time_plot_max': 125}
# hist = MAP_Covariate.MAP_Covariate(self.params, y, name='History', is_cov=False, is_hist=True)
hist = MAP_Covariate.MAP_Covariate(self.params, self.hist_data_c, name='Coupling_c', is_cov=False, is_hist=False)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
self.map_model = PoissonMAP(self.params, self.data_df, glm_map)
self.map_model.initialize_model()
def create_ml_covariates(self):
glm_map = GLM_Model_MAP.GLM_Model_MAP(self.params)
# stimulus 1 parameters
stim1_bounds = {'m': [-np.inf, np.inf]}
stim1_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'num_bases': 10,
'duration': self.params.duration_cov,
'bin_size': self.params.delta,
'end_point': 125e-3,
'start_point': 0,
'nl_offset': 2e-3,
'offset': self.params.offset_cov,
'filter_duration': self.params.duration_cov,
'filter_offset': self.params.offset_cov}
stim1 = ML_Covariate.ML_Covariate(self.params, self.stim_data, name='Stimuli_1', is_cov=True, is_hist=False)
stim1.add_bounds_params(stim1_bounds)
stim1.add_bases_params(stim1_bases_params)
glm_map.add_covariate(stim1)
# history filter parameters
hist_bounds = {'m': [-np.inf, np.inf]}
hist_bases_params = {'bases_fn': utils.create_nonlinear_raised_cos,
'duration': 80,
'num_bases': 15,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 35e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': self.params.duration_hist,
'filter_offset': self.params.offset_hist}
hist = ML_Covariate.ML_Covariate(self.params, self.hist_data, name='History', is_cov=False, is_hist=True)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
self.ml_model = PoissonMAP(self.params, self.data_df, glm_map, self.params.run_toy_problem)
self.ml_model.initialize_model()
def train_variational(self):
self.variational_model.train_variational_parameters()
def train_map(self):
self.map_model.train_map_parameters()
def train_ml(self):
self.ml_model.train_ml_parameters()
def _add_training_params(self):
pass
def train_model(self, model='variational'):
trained_params = self.poisson_model.train_variational()
def _get_ml_h_k_mu(self):
optimizer = Optimizer.Optimizer(self.params.gp_ml_opt, self.h.shape[0], b1=self.params.gp_ml_b1,
b2=self.params.gp_ml_b2, step_size=self.params.gp_ml_step_size)
for i in range(self.params.gp_ml_iter):
grad = self.X.T @ self.y - self.params.delta * self.X.T @ np.exp(self.X @ self.h + self.Y @ self.k)
update = optimizer.get_update(grad)
self.h = self.h + update # maximizing maximum likelihood
plt.plot(self.h, label='ml')
plt.plot(self.h_true, label='ground truth')
plt.title('ml estimate')
plt.show()
def plot_updates(self):
fig, axs = plt.subplots(self.h_evolution.shape[0] - 1, figsize=(10,60))
fig.suptitle('GP Filter Evolution', y=0.92)
for dx, (row, series) in enumerate(self.h_evolution.iloc[1:,:].iterrows()):
axs[dx].plot(series['filter'], label='gp', color='k')
axs[dx].plot(self.h_true, label='true', color='r')
axs[dx].fill_between(np.arange(series['filter'].shape[0]), series['filter'] - series['cov'],
series['filter'] + series['cov'], alpha=0.3, color='k')
axs[dx].plot(series['inducing'], np.zeros(series['inducing'].shape[0]), 'o', color='orange', label='inducing points')
axs[dx].legend()
self._set_filter_axs(axs[dx])
axs[dx].set_title(row)
plt.subplots_adjust(hspace=0.3)
fig.savefig('glm_data/gp_filter_evolution.pdf', dpi=300)
plt.show()
def _set_filter_axs(self, axs):
len = self.h_time.shape[0]
axs.set_xticks([i for i in np.arange(len + 1) if i % 50 == 0])
labels = [int(i * self.params.time_div) for i in self.h_time if (i * self.params.time_div) % 50 == 0]
labels.append(int(len / 2))
axs.set_xticklabels(labels)
# def fn_min(half_coeff, x, y, Kinv):
# temp = np.zeros(Kinv.shape[0])
# return -1 * (h.T @ (x.T @ y) - np.sum(np.exp(x @ h)) - 0.5 * h.T @ (Kinv @ h))
#
# def jac(half_coeff, X, y, Kinv):
# return X.T @ np.exp(X@h) - X.T @ y + Kinv @ h
#
# def hess(half_coeff, x, y, Kinv):
# return x.T @ np.diag(np.exp(x@h)) @ x
def callback(h_updated):
print('entered callback')
mult_exp_mat = np.load('mult_exp_mat.npy')
unused_exp = mult_exp_mat @ h_updated
cov = np.load('callback_var.npy')
# h_unused = np.random.multivariate_normal(unused_exp, cov)
h_unused = unused_exp
np.save('h_unused.npy', h_unused)
def fn_min(half_coeff, time, use_dx, unuse_dx, X, x_use, y, Kinv):
delta = 0.001
h = np.zeros(time.shape[0])
h[use_dx] = half_coeff
h[unuse_dx] = np.load('h_unused.npy')
obj = -1 * (h.T @ (X.T @ y) - delta*np.sum(np.exp(X @ h)) - 0.5 * half_coeff @ (Kinv @ half_coeff)) + time.shape[0] * np.log(delta)
print(obj)
return obj
def jac(half_coeff, time, use_dx, unuse_dx, X, x_use, y, Kinv):
delta = 0.001
h = np.zeros(time.shape[0])
h[use_dx] = half_coeff
h[unuse_dx] = np.load('h_unused.npy')
return delta*x_use.T @ np.exp(X@h) - x_use.T @ y + Kinv @ half_coeff
def hess(half_coeff, time, use_dx, unuse_dx, X, x_use, y, Kinv):
delta = 0.001
h = np.zeros(time.shape[0])
h[use_dx] = half_coeff
h[unuse_dx] = np.load('h_unused.npy')
hess = delta*x_use.T @ np.diag(np.exp(X@h)) @ x_use
# if not utils.isPD(hess):
# return utils.nearestPD(hess)
return hess
#
# u = 4
# unused_dx = [i for i in range(self.h_time.shape[0]) if i % u == 0]
# used_dx = [i for i in range(self.h_time.shape[0]) if i % u != 0]
# unused_time = self.h_time[unused_dx]
# hh_time = self.h_time[used_dx]
#
# h = self.h_true + 0.01*np.random.randn(self.h.shape[0])
# hh = h[used_dx]
# xx = self.X[:, used_dx]
# # kk = utils.decay_kernel(self.h_time.reshape(-1, 1),self.h_time.reshape(-1, 1), sigma_h=self.sigma_true,
# # alpha=self.alpha_true, gamma=self.gamma_true)[:,used_dx][used_dx,:]
# kk = ka.RBF(1).__call__(1000*hh_time.reshape(-1,1))
# kk_inv = np.linalg.inv(kk)
#
#
#
#
# # k_used_used = utils.decay_kernel(hh_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# # alpha=self.alpha_true, gamma=self.gamma_true)
# k_used_used = ka.RBF(1).__call__(hh_time.reshape(-1,1)*1000, 1000*hh_time.reshape(-1,1))
# k_unused_used = ka.RBF(1).__call__(1000*unused_time.reshape(-1, 1),1000*hh_time.reshape(-1, 1))
# k_used_unused = ka.RBF(1).__call__(1000*hh_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
# k_unused_unused = ka.RBF(1).__call__(1000*unused_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
# k_unused_used = utils.decay_kernel(unused_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_used_unused = utils.decay_kernel(hh_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_unused_unused = utils.decay_kernel(unused_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# time, use_dx, unuse_dx, X, x_use, y, Kinv
#
#
# u = 3
# r = 10
# h = np.copy(self.h_true) + 0.5*np.random.randn(self.h_true.shape[0])
# unuse_dx = [i for i in range(self.h_time.shape[0]) if i % u == 0]
# use_dx = [i for i in range(self.h_time.shape[0]) if i % u != 0]
# time = self.h_time
# hh_time = time[use_dx]
# unused_time = time[unuse_dx]
#
# K = utils.decay_kernel(hh_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true, noise_std=self.params.gp_noise_std)
# k_unused_used = utils.decay_kernel(unused_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_used_unused = utils.decay_kernel(hh_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_unused_unused = utils.decay_kernel(unused_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true, noise_std=self.params.gp_noise_std)
#
#
# X = np.copy(self.X)
# x_use = X[:,use_dx]
# y = np.copy(self.y)
# # y[y>0] = 1
#
# Kinv = scipy.linalg.inv(K)
# h_use = h[use_dx]
#
# mult_exp_mat = k_unused_used @ Kinv
# h_unuse_est = mult_exp_mat @ h_use
# np.save('mult_exp_mat.npy', mult_exp_mat)
#
# cov = k_unused_unused - k_unused_used @ Kinv @ k_unused_used.T
# np.save('callback_var.npy', cov)
# h_unused = np.random.multivariate_normal(mult_exp_mat @ h_use, cov)
# np.save('h_unused.npy', h_unused)
#
# a = scipy.optimize.minimize(fn_min, h_use, args=(time, use_dx, unuse_dx, X, x_use, y, Kinv),
# method='Newton-CG', jac=jac, hess=hess, options={'xtol':1e-4, 'disp':True, 'maxiter':100000},
# callback=callback)
# min_h_use = a.x
# mult_exp_mat = k_unused_used @ Kinv
# h_unuse_est = mult_exp_mat @ min_h_use
# h_use_est = k_used_unused @ (np.linalg.inv(k_unused_unused) @ h_unuse_est)
#
# estimated_h_all = np.zeros(self.h_true.shape[0])
# estimated_h_all[use_dx] = h_use_est
# estimated_h_all[unuse_dx] = h_unuse_est
# plt.plot(estimated_h_all)
# plt.plot(self.h_true)
# plt.show()
# k_used_used = ka.RBF(r).__call__(hh_time.reshape(-1,1)*1000, 1000*hh_time.reshape(-1,1))
# k_unused_used = ka.RBF(r).__call__(1000*unused_time.reshape(-1, 1),1000*hh_time.reshape(-1, 1))
# k_used_unused = ka.RBF(r).__call__(1000*hh_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
# k_unused_unused = ka.RBF(r).__call__(1000*unused_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
#
# a = scipy.optimize.minimize(fn_min, hh, args=(xx, self.y, kk_inv),
# method='Newton-CG', jac=jac, hess=hess, options={'xtol':1e-5, 'disp':True, 'maxiter':100000},
# callback=callback)
# h_used = a.x
# # h_unused = -1*(k_unused_used) @ (utils.nearestPD(np.linalg.inv(k_used_used)) @ h_used)
# h_unused = self.h_true[unused_dx]
# h_all = np.zeros(self.h_time.shape[0])
# h_all[used_dx] = h_used
# h_all[unused_dx] = h_unused
# plt.plot(h_all)
# plt.plot(self.h_true)
# plt.show()
#
#
# plt.plot(h_used)
# plt.plot(self.h_true[used_dx])
# plt.show()
#
# k_unused_unused = utils.decay_kernel(10*unused_time.reshape(-1, 1),10*unused_time.reshape(-1, 1), sigma_h=1000,
# alpha=0.1, gamma=0.45)
#
# sample = np.random.multivariate_normal(np.zeros(k_unused_unused.shape[0]), k_unused_unused)
# plt.plot(sample)
# plt.show()
#
# def obj_fn(alpha, X, y, h, h_time):
# K = utils.decay_kernel(h_time.reshape(-1, 1), h_time.reshape(-1, 1), sigma_h=2, alpha=alpha, gamma=600, noise_std=1)
# Kinv = np.linalg.inv(K)
# W = X.T @ np.diag(np.exp(X @ h)) @ X
# Wsqrt = np.sqrt(1e-3) * sqrtm(X.T @ (np.diag(np.exp(X @ h)) @ X))
# I = np.identity(K.shape[0])
# obj1 = y.T @ X @ h
# obj2 = -1 * np.sum(np.exp(X@h))
# obj3 = -0.5 * h.T @ Kinv @ h
# obj4 = 0 #-0.5 * np.linalg.slogdet(I + Wsqrt @ (K @ Wsqrt))
#
# return -1*(obj1 + obj2 + obj3 + obj4)
#
# def obj_grad(alpha, X, y, h, h_time):
# K = utils.decay_kernel(h_time.reshape(-1, 1), h_time.reshape(-1, 1), sigma_h=2,
# alpha=alpha, gamma=600, noise_std=0.001)
# Kinv = np.linalg.inv(K)
# Wsqrt = np.sqrt(1e-3) * sqrtm(X.T @ (np.diag(np.exp(X @ h)) @ X))
# I = np.identity(K.shape[0])
#
# K_prime = K * -1 * np.log(np.outer(np.exp(h_time ** 2), np.exp(h_time ** 2)))
# term1 = 0.5 * h.T @ (Kinv @ (K_prime @ Kinv)) @ h
# term2 = -0.5 * np.trace(np.linalg.inv(I + Wsqrt @ K @ Wsqrt) @ (Wsqrt @ K_prime @ Wsqrt))
#
# return -1*(term1 + term2)
#
# def callback(alpha):
# print(f'alpha: {alpha}')
#
# a = optimize.minimize(obj_fn, 500, args=(x_design, y_true, h_true, h_time),
# method='BFGS', jac=obj_grad, options={'xtol': 1e-4, 'disp': True, 'maxiter': 500},
# callback=callback)
#
# def test_deriv():
# a = 1
# b = 1
# c = 3.1
# d = 1.3
# e = 2.4
# time = np.array([1,2]).reshape(-1,1)
# time_add_sq = -1 * np.log(np.outer(np.exp(time ** 2), np.exp(time ** 2)))
# A = np.array([[a, 0], [0, b]])
# B = np.array([[c, d], [0, e]])
#
# K = utils.decay_kernel(time, time, sigma_h=2,
# alpha=2, gamma=1)
#
# inside_log = a*e*K[1,1] + a*b + c*e*K[0,0]*K[1,1] + c*b*K[0,0] + d*e*K[1,0]*K[1,1] + b*d*K[1,0]
# deriv = (-a*e*(2**2 + 2**2)*K[1,1] - c*e*K[0,0]*(2**2 + 2**2)*K[1,1] - c*e*K[1,1]*(1**2 + 1**2)*K[0,0] -
# b*c*(1**2 + 1**2)*K[0,0] - d*e*K[1,0]*(2**2 + 2**2)*K[1,1] - d*e*K[1,1]*(1**2 + 2**2)*K[1,0] - d*b*(1**2 + 2**2)*K[1,0])
#
# K_prime = K * time_add_sq
# grad1 = deriv/inside_log
# grad2 = np.trace(np.linalg.inv(A + B@K) @ B@K_prime)
def _log_likelihood_brute( params, *args):
alpha = params[0]
gamma = params[1]
sigma = params[2]
print(alpha)
h = args[0]
time = args[1]
X = args[2]
y = args[3]
delta = self.params.delta
Kinv = self.GP.K_all_inv
obj = -1 * (h.T @ (X.T @ y) - delta * np.sum(np.exp(X @ h)) - 0.5 * h @ (Kinv @ h)) + \
time.shape[0] * np.log(delta)
return obj
# y = np.zeros(200)
# for alpha in range(y.shape[0]):
# y[alpha] = _log_marginal_deriv_wrt_alpha_test(np.array([5*alpha]), self.h_true)
def _log_marginal_deriv_wrt_alpha_test(alpha_in):
h = self.h
alpha = alpha_in[0]
sigma = self.sigma
gamma = self.gamma
K = utils.decay_kernel(self.h_time.reshape(-1, 1), self.h_time.reshape(-1, 1), sigma_h=sigma,
alpha=alpha, gamma=gamma, noise_std=0.001)
Kinv = np.linalg.inv(K)
W = self.params.delta * self.X.T @ (np.diag(np.exp(self.X @ self.h_true)) @ self.X)
Winv = np.linalg.inv(W)
Wsqrt = np.sqrt(self.params.delta) * sqrtm(self.X.T @ (np.diag(np.exp(self.X @ self.h_true)) @ self.X))
I = np.identity(K.shape[0])
K_prime = K * -1 * np.log(np.outer(np.exp(self.h_time ** 2), np.exp(self.h_time ** 2)))
# term1 = 0.5 * self.h_true.T @ (Kinv @ (K_prime @ Kinv)) @ self.h_true
# term2 = -0.5 * np.trace(np.linalg.inv(I + Wsqrt @ K @ Wsqrt) @ (Wsqrt @ K_prime @ Wsqrt))
term1 = 0.5*h.T @ (Kinv @ K_prime @ Kinv) @ h - 0.5*np.trace(np.linalg.inv(Winv + K)@K_prime)
term2 = np.zeros(self.X.shape[1])
inv_K_inv_W = np.linalg.inv(Kinv + W)
for i in range(self.X.shape[1]):
term2[i] = -0.5*np.trace(inv_K_inv_W @ (self.X.T @ np.diag(self.X[:,i]*np.exp(self.X@h)) @ self.X))
# X_reshape = self.X.reshape(self.X.shape[0], 1, -1)
# inv_Kinv_pl_W = np.linalg.inv(Kinv + W)
#
# diag_mat = diag_3d(self.X*np.exp(self.X@h).reshape(-1,1))
# part1 = np.einsum('ij,jkl->ikl', self.X.T, diag_mat)
# part2 = np.einsum('ijl,jk->ikl', part1, self.X)
# part3 = np.einsum('ij,jkl->ikl', inv_Kinv_pl_W, part2)
# term2 = np.trace(part3)
#
# diag_mat = diag_3d(self.X[:,self.X.shape[1]//2:] * np.exp(self.X @ h).reshape(-1, 1))
# part1 = np.einsum('ij,jkl->ikl', self.X.T, diag_mat)
# part2 = np.einsum('ijl,jk->ikl', part1, self.X)
# part3 = np.einsum('ij,jkl->ikl', inv_Kinv_pl_W, part2)
# term2 = np.concatenate([term2, np.trace(part3)])
term3 = np.linalg.inv(I + K@W) @ K_prime @ (self.X.T @ self.y - self.params.delta*self.X.T @ np.exp(self.X@h))
grad = term1 + | np.sum(term2*term3) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 8 13:28:03 2021
@author: pmazumdar
"""
##############################################
## Smooth 13CO map to CHIMPS Resolution ##
##############################################
import radio_beam
from spectral_cube import SpectralCube
from astropy import units as u
from astropy.table import Table
from astrodendro import Dendrogram, ppv_catalog, structure
from astropy import wcs
from astropy.table import Table
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from astrodendro.pruning import all_true, min_vchan, min_delta, min_area
from astropy import constants as const
import aplpy
import seaborn as sns
import scipy.stats as sst
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
cube = SpectralCube.read('/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/G305_13CO_resample.fits')
cube_cd = SpectralCube.read('/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/ntotal.fits')
beam = radio_beam.Beam(major=27.4*u.arcsec, minor=27.4*u.arcsec, pa=0*u.deg)
new_cube = cube.convolve_to(beam)
new_cube_cd = cube_cd.convolve_to(beam)
hdu_13CO = fits.open('/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/G305_13CO-moment0.fits')[0]
mask_nan = ~np.isnan(hdu_13CO.data) # only include non nan pixels
masked_cube = new_cube.with_mask(mask_nan) # apply mask to spectral cube
masked_cube_cd = new_cube_cd.with_mask(mask_nan) # apply mask to spectral cube
data = masked_cube.hdu.data
hd = masked_cube.hdu.header
wc = wcs.WCS(hd)
data_cd = masked_cube_cd.hdu.data
hd_cd = masked_cube_cd.hdu.header
wc_cd = wcs.WCS(hd_cd)
## Custom Definitions for the Dendrogram ##
rms = 0.15 # rms noise
rms_cd = 1.6e7
cd_min = 3.37e11
bmaj = hd['bmaj'] # beam_major
bmin = hd['bmin'] # beam_minor
cdelt1 = hd['cdelt1'] # delta_x
cdelt2 = hd['cdelt2'] # delta_y
deltav_kms = abs(hd['CDELT3']/1000.) # vel res in kmps
ppb = abs((bmaj*bmin)/(cdelt1*cdelt2)*2*np.pi/(8*np.log(2))) # pixel_per_beam
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Creating the Dendrogram
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Prune leaves below a given height:
#def custom_independent(structure, index=None, value=None):
# peak_index, peak_value = structure.get_peak()
# return peak_value > 5
is_independent = all_true((min_delta(5*rms), min_area(1*ppb), min_vchan(6)))
#is_independent_cd = all_true((min_delta(3*cd_min), min_area(1*ppb), min_vchan(2)))
d = Dendrogram.compute(data, min_value=5*rms, wcs=wc, is_independent = is_independent, verbose=1)
d.save_to('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_13CO_smoothed_dendro.fits')
#d_cd = Dendrogram.compute(data_cd, min_value=5*cd_min, wcs=wc_cd, is_independent = is_independent_cd, verbose=1)
#d_cd.save_to('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_column_densty_smoothed_dendro.fits')
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% START HERE IF DENDROGRAM ALREADY RUN
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#d = Dendrogram.load_from('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_13CO_dendro.fits')
#d_cd = Dendrogram.load_from('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_column_densty_dendro.fits')
leaf_id=np.array([])
for s in d.all_structures:
if s.is_leaf:
leaf_id=np.append(leaf_id,s.idx)
print(leaf_id)
leaf_id.sort()
leaf_cd_id=np.array([])
#for s in d_cd.all_structures:
# if s.is_leaf:
# leaf_cd_id=np.append(leaf_cd_id,s.idx)
#print(leaf_cd_id)
#leaf_cd_id.sort()
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Viewing and Plotting the Dendrogram
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
p = d.plotter()
#p_cd = d_cd.plotter()
#fig,ax = plt.subplots(nrows = 2, ncols=1)
#plt.rcParams.update({"font.size":6})
#ax[0] = fig.add_subplot(1, 1, 1)
#ax[0].set_ylabel('$^{13}$CO Peak Intensity')
#p.plot_tree(ax[0],color='seagreen',lw=0.5)
#p_cd.plot_tree(ax[1],color='orange',lw=0.5)
#ax[1].set_yscale('log')
#ax[1].set_xlabel('Index of Structure')
#ax[1].set_ylabel('$^{13}$CO Column Density')
#plt.savefig("/home/pmazumdar/Documents/LASMA/Reduction/class_maps/temp/plots/Dendrogram_G305.eps",dpi=300)
#plt.close()
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Creating the ppv_catalog
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
metadata = {}
metadata['data_unit'] = u.Jy
metadata['beam_major'] = (bmaj * u.deg .to(u.arcsecond))*u.arcsecond # FWHM
metadata['beam_minor'] = (bmin * u.deg .to(u.arcsecond))*u.arcsecond # FWHM
metadata['velocity_scale'] = 0.5 * u.km/u.s # v_res
cat = ppv_catalog(d,metadata)
#cat_cd = ppv_catalog(d_cd,metadata)
# Note : Catalog of Column Density : Flux per pixel represents Col. Density per delX.delY.delV ##
#
dists = 3800*u.parsec # Distance of the source.
dist_gc = 6598.5452942296305*u.parsec
x2 = 18 # x2 = X_CO / 2E20 [cm^-2 / K km s^-1] , where X_CO is a CO-H2 conversion factor.
#x2 = 1 # for 12CO
sigma_v = np.array(cat['v_rms'])
sigma_x = np.array(cat['radius'])*(((1*u.arcsecond).to(u.rad)).value)*dists.value
#sigma_v_cd = np.array(cat_cd['v_rms'])
#sigma_x_cd = np.array(cat_cd['radius'])*(((1*u.arcsecond).to(u.rad)).value)*dists.value
eta = 1.91 # conversion factor. R = eta * sigma_r
G = 4.302e-3 # units of pc (M_sun)^-1 (km/s)^2
deltax_pc = abs(np.pi/180.*hd['CDELT1']*dists.value) # delta x in pc
deltay_pc = abs(np.pi/180.*hd['CDELT2']*dists.value) # delta y in pc
sigma_majs = cat['major_sigma']
sigma_mins = cat['minor_sigma']
#sigma_majs_cd = cat_cd['major_sigma']
#sigma_mins_cd = cat_cd['minor_sigma']
R12_13 = (6.21*(dist_gc.value/1000.0))+18.71 # C12/C13 abundance ratio (Milam et al. 2005)
R12 = 8.5e-5 # C12/H2 abundance (Frerking et al.)
R13_inv = R12_13/R12
mu = 2.72 # Average H2 mass including He fraction
mp = 8.4089382e-58*u.solMass # Proton Mass
nu_12CO = 345.79598990*u.GHz
nu_13CO = 330.58796530*u.GHz
delta_nu_12 = 0.0011534512649414282*u.GHz
delta_nu_13 = 0.0011027227552869259*u.GHz
##
## Additions to Integrated Intensity Catalog
##
## adding a radius column to the catalog
cat['radius_pc'] = np.zeros(len(cat),dtype=float)
cat['radius_pc'] = eta*np.sqrt((sigma_majs*deltax_pc)*(sigma_mins*deltay_pc))
cat['radius_pc'].unit = u.parsec
## adding a luminosity column to the catalog
cat['luminosity']=np.zeros(len(cat),dtype=float)
cat['luminosity'] = cat['flux']*deltav_kms*deltax_pc*deltay_pc
cat['luminosity'].unit = u.K * u.km / u.s * u.pc * u.pc
## adding a mass column to the catalog
#cat['M_lum']=np.zeros(len(cat),dtype=float)
#cat['M_lum'] = cat['luminosity']*4.4*x2
#cat['M_lum'].unit = u.solMass
cat['Mass']=np.zeros(len(cat),dtype=float)
cat['Mass'].unit = u.solMass
for s in d.all_structures:
str_mask = s.get_mask()
str_cube = data_cd[str_mask]
total_cd = np.nansum(str_cube)
mass = mu*mp.value*R13_inv*total_cd*deltax_pc*deltay_pc*(u.parsec.to(u.centimeter))**2
cat[s.idx]['Mass'] = mass
## adding a surface density column to the catalog
cat['Sigma_exact']=np.zeros(len(cat),dtype=float)
cat['Sigma_exact'] = cat['Mass']/(cat['area_exact']*deltax_pc*deltay_pc)
cat['Sigma_exact'].unit = u.solMass/(u.pc*u.pc)
cat['Sigma_ellipse'] = np.zeros(len(cat),dtype=float)
cat['Sigma_ellipse'] = cat['Mass']/(np.pi*cat['radius_pc']**2)
cat['Sigma_ellipse'].unit = u.solMass/(u.pc*u.pc)
## calculating virial parameter alpha
cat['virial_parameter'] = np.zeros(len(cat),dtype=float)
cat['virial_parameter'] = (5*((sigma_v)**2)*cat['radius_pc'])/(4.4*x2*cat['luminosity']*G)
cat['virial_parameter'].unit = ''
##
## Additions to Integrated Col. Den. Catalog
##
## adding a radius column to the catalog
#cat_cd['radius_pc'] = np.zeros(len(cat_cd),dtype=float)
#cat_cd['radius_pc'] = eta*np.sqrt((sigma_majs_cd*deltax_pc)*(sigma_mins_cd*deltay_pc))
#cat_cd['radius_pc'].unit = u.parsec
## adding a mass column to the catalog
#cat_cd['Mass'] = np.zeros(len(cat_cd),dtype=float)
#cat_cd['Mass'] = mu*mp*R13_inv*cat_cd['flux']*deltav_kms*deltax_pc*deltay_pc*(u.parsec.to(u.centimeter))**2
#cat_cd['Mass'].unit = u.solMass
## adding a surface density column to the catalog
#cat_cd['Sigma_exact']=np.zeros(len(cat_cd),dtype=float)
#cat_cd['Sigma_exact'] = cat_cd['Mass']/(cat_cd['area_exact']*deltax_pc*deltay_pc)
#cat_cd['Sigma_exact'].unit = u.solMass/(u.pc*u.pc)
#cat_cd['Sigma_ellipse'] = np.zeros(len(cat_cd),dtype=float)
#cat_cd['Sigma_ellipse'] = cat_cd['Mass']/(np.pi*cat_cd['radius_pc']**2)
#cat_cd['Sigma_ellipse'].unit = u.solMass/(u.pc*u.pc)
## calculating virial parameter alpha
#cat_cd['virial_parameter'] = np.zeros(len(cat_cd),dtype=float)
#cat_cd['virial_parameter'] = (3*((sigma_v_cd)**2)*cat_cd['radius_pc'])/(cat_cd['Mass']*G)
#cat_cd['virial_parameter'].unit = ''
## Save Catalogs ##
cat.write('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_13CO_smoothed_cat.fits',overwrite=True)
#cat_cd.write('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_column_density_smoothed_cat.fits',overwrite=True)
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% Loading the ppv_catalog
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#cat = Table.read('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_13CO_cat.fits')
#cat_cd = Table.read('/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/G305_column_density_cat.fits')
#
# Variables for plotting leaves
#
L_CO = np.array([])
radius_pc = np.array([])
vrms = np.array([])
mass = np.array([])
Sigma_exact = np.array([])
Sigma_ellipse = np.array([])
alpha = np.array([])
#radius_pc_cd = np.array([])
#vrms_cd = np.array([])
#mass_cd = np.array([])
#Sigma_exact_cd = np.array([])
#Sigma_ellipse_cd = np.array([])
#alpha_cd = np.array([])
for i in leaf_id:
L_CO = np.append(L_CO, cat[int(i)]['luminosity'])
radius_pc = np.append(radius_pc,cat[int(i)]['radius_pc'])
vrms = np.append(vrms,sigma_v[int(i)])
mass = np.append(mass, cat[int(i)]['M_lum'])
#mass = np.append(mass, cat[i]['Mass'])
Sigma_exact = np.append(Sigma_exact, cat[int(i)]['Sigma_exact'])
Sigma_ellipse = np.append(Sigma_ellipse,cat[int(i)]['Sigma_ellipse'])
alpha = np.append(alpha,cat[int(i)]['virial_parameter'])
#for i in leaf_cd_id:
# radius_pc_cd = np.append(radius_pc_cd,cat_cd[int(i)]['radius_pc'])
# vrms_cd = np.append(vrms_cd,sigma_v_cd[int(i)])
# mass_cd = np.append(mass_cd, cat_cd[int(i)]['Mass'])
# Sigma_exact_cd = np.append(Sigma_exact_cd, cat_cd[int(i)]['Sigma_exact'])
# Sigma_ellipse_cd = np.append(Sigma_ellipse_cd,cat_cd[int(i)]['Sigma_ellipse'])
# alpha_cd = np.append(alpha_cd,cat_cd[int(i)]['virial_parameter'])
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
#&%
#&% CHIMPS and ATLASGAL Data Load
#&%
#&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
cmp_data = Table.read("/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/table4.fits")
cmp_mass = cmp_data['Mass']
cmp_radius = cmp_data['R_eq']
cmp_alpha = cmp_data['alpha'].astype(np.float64)
cmp_flag = cmp_data['Flag_R']
cmp_vrms = cmp_data['sigma_v']
AGAL_data = Table.read("/home/pmazumdar/Documents/LASMA/Dendrogram/fits_files/asu.fits")
AGAL_mass = AGAL_data['logMclump'].astype(np.float64)
AGAL_alpha = AGAL_data['alpha'].astype(np.float64)
AGAL_radius = AGAL_data['Rad'].astype(np.float64)
AGAL_vrms = AGAL_data['NH3LW'].astype(np.float64)
plt.rcParams.update({'font.size':10})
fig,ax = plt.subplots(ncols=4,nrows=1,figsize=(15,4))
sns.kdeplot(data=np.log10(mass),ax=ax[0],c='goldenrod',fill=1,alpha=0.5,label='G305')
#sns.kdeplot(data=np.log10(mass_cd),ax=ax[0],c='gray',fill=0,alpha=0.5,label='N$_{cd}$')
sns.kdeplot(data=np.log10(cmp_mass[(cmp_flag==3)]),ax=ax[0],c='blue',fill=1,alpha=0.5,label='CHIMPS')
sns.kdeplot(data=AGAL_mass[AGAL_mass>0],ax=ax[0],c='maroon',fill=0,alpha=0.5,linestyle='dashed',label='AGAL')
ax[0].set_xlabel(r'log$_{10}$(M/M$_{\odot}$)')
ax[0].legend(loc='upper left')
sns.kdeplot(data=np.log10(radius_pc),ax=ax[1],c='goldenrod',fill=1,alpha=0.5)
#sns.kdeplot(data=np.log10(radius_pc_cd),ax=ax[1],c='gray',fill=0,alpha=0.5)
sns.kdeplot(data=np.log10(cmp_radius[(cmp_flag==3)]),ax=ax[1],c='blue',fill=1,alpha=0.5)
sns.kdeplot(data=np.log10(AGAL_radius[AGAL_radius>0]),ax=ax[1],c='maroon',fill=0,alpha=0.5,linestyle='dashed')
ax[1].set_xlabel(r'log$_{10}$(R$_{eq}$/pc)')
ax[1].set_ylabel('')
sns.kdeplot(data= | np.log10(alpha) | numpy.log10 |
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description="Convert UVHDF5 files into CASA Measurement Set files.")
parser.add_argument("--HDF5", default="model.hdf5", help="The name of the UVHDF5 file you wish to import.")
parser.add_argument("--MS", help="The original MS data set, so that we can copy it to stuff in new values.")
parser.add_argument("--out", default="model.ms", help="The output MS dataset.")
parser.add_argument("--casac", action="store_true", help="Use the casac distribution instead of casapy")
parser.add_argument("--skip", action="store_true", help="Skip checking that the weights are the same.")
args,extras = parser.parse_known_args()
import numpy as np
import sys
import shutil
import h5py
import os
cc = 2.99792458e10 # [cm s^-1]
# Credit: parts of this file originated from the `vis_sample` repository,
# at https://github.com/AstroChem/vis_sample/blob/master/vis_sample/file_handling.py
# CASA interfacing code comes from <NAME>' casa-python and casa-data package
# commands for retrieving ms data are from <NAME> (@seanandrews)
ms_clone = args.MS
outfile = args.out
if args.casac:
try:
import casac
tb = casac.casac.table()
ms = casac.casac.ms()
except ImportError:
print("casac was not able to be imported, make sure all dependent packages are installed")
print("try: conda install -c pkgw casa-python casa-data")
sys.exit(1)
# Copy the original file so that we can then stuff our own visibilities into it
os.system("rm -rf " + outfile)
shutil.copytree(ms_clone, outfile)
# Use CASA ms tools to get the channel/spw info
ms.open(outfile)
spw_info = ms.getspectralwindowinfo()
nchan = spw_info["0"]["NumChan"]
npol = spw_info["0"]["NumCorr"]
ms.close()
# Use CASA table tools to get frequencies
tb.open(outfile + "/SPECTRAL_WINDOW")
ms_freqs = np.squeeze(tb.getcol("CHAN_FREQ"))
tb.close()
# Ascertain whether the frequencies were stored increasing or decreasing in the original MS
if np.all(np.diff(ms_freqs) > 0.0):
dnu_pos = True
elif np.all(np.diff(ms_freqs) < 0.0):
dnu_pos = False
else:
raise RuntimeError("Measurement Set Frequencies not in strict monotonically increasing or decreasing order.")
# Read the model from the HDF5 file
fid = h5py.File(args.HDF5, "r")
if dnu_pos:
freqs = fid["freqs"][:] # [Hz]
uu = fid["uu"][:,:] # [kilolam]
vv = fid["vv"][:,:] # [kilolam]
real = fid["real"][:,:] # [Jy]
imag = fid["imag"][:,:] # [Jy]
weight = fid["weight"][:,:] #[1/Jy^2]
unflagged = fid["flag"][:,:] # Bool
else:
freqs = fid["freqs"][:][::-1] # [Hz]
uu = fid["uu"][:][::-1,:] # [kilolam]
vv = fid["vv"][:][::-1,:] # [kilolam]
real = fid["real"][:][::-1,:] # [Jy]
imag = fid["imag"][:][::-1,:] # [Jy]
weight = fid["weight"][:][::-1,:] #[1/Jy^2]
unflagged = fid["flag"][:][::-1,:] # Bool
VV = real + 1.0j * imag # [Jy]
fid.close()
# Check to make sure the frequencies of the two datasets match
assert np.allclose(freqs, ms_freqs), "Frequencies of MS and HDF5 do not match."
# Use CASA table tools to fill new DATA and WEIGHT
tb.open(outfile, nomodify=False)
data = tb.getcol("DATA")
uvw = tb.getcol("UVW")
ms_weight = tb.getcol("WEIGHT")
ms_flag = tb.getcol("FLAG")
# flagged = np.any(ms_flag, axis=(0, 1)) # Boolean array of length nvis
# unflagged = ~flagged # Flip so that indexing by this array gives us the good visibilities
flagged = np.any(ms_flag, axis=(0))
# we need to pull the antennas and find where the autocorrelation values are and aren't
ant1 = tb.getcol("ANTENNA1")
ant2 = tb.getcol("ANTENNA2")
# xc = ant1 != ant2 # indices of cross-correlations
xc = ant1 != ant2 # indices of cross-correlations
# Now, combine the flagging indices with the autocorrelation indices
# These flags denote all visibilities that we should not be including in our likelihood calculation.
# ms_flag = ~xc & flagged
ms_unflagged = xc & ~flagged
# Break out the u, v spatial frequencies (in meter units)
ms_uu = uvw[0,:]
ms_vv = uvw[1,:]
# u and v are measured in meters, convert to microns and then convert these to kilo-lambda
# Convert freqs to wavelengths in microns
lams = cc/ms_freqs * 1e4 # [microns]
ms_uu = 1e-3 * (np.tile(ms_uu * 1e6, (nchan, 1)).T / lams).T
ms_vv = 1e-3 * ( | np.tile(ms_vv * 1e6, (nchan, 1)) | numpy.tile |
from parcels import (FieldSet, Field, ScipyParticle, JITParticle,
Variable, StateCode, OperationCode, CurvilinearZGrid)
from parcels import ParticleSetSOA, ParticleFileSOA, KernelSOA # noqa
from parcels import ParticleSetAOS, ParticleFileAOS, KernelAOS # noqa
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
pset_type = {'soa': {'pset': ParticleSetSOA, 'pfile': ParticleFileSOA, 'kernel': KernelSOA},
'aos': {'pset': ParticleSetAOS, 'pfile': ParticleFileAOS, 'kernel': KernelAOS}}
def fieldset(xdim=40, ydim=100):
U = np.zeros((ydim, xdim), dtype=np.float32)
V = np.zeros((ydim, xdim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = np.linspace(-60, 60, ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}
dimensions = {'lat': lat, 'lon': lon, 'depth': depth}
return FieldSet.from_data(data, dimensions)
@pytest.fixture(name="fieldset")
def fieldset_fixture(xdim=40, ydim=100):
return fieldset(xdim=xdim, ydim=ydim)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_create_lon_lat(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart, dtype=np.float32)
lat = np.linspace(1, 0, npart, dtype=np.float32)
pset = pset_type[pset_mode]['pset'](fieldset, lon=lon, lat=lat, pclass=ptype[mode])
assert np.allclose([p.lon for p in pset], lon, rtol=1e-12)
assert np.allclose([p.lat for p in pset], lat, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('lonlatdepth_dtype', [np.float64, np.float32])
def test_pset_create_line(fieldset, pset_mode, mode, lonlatdepth_dtype, npart=100):
lon = np.linspace(0, 1, npart, dtype=lonlatdepth_dtype)
lat = np.linspace(1, 0, npart, dtype=lonlatdepth_dtype)
pset = pset_type[pset_mode]['pset'].from_line(fieldset, size=npart, start=(0, 1), finish=(1, 0),
pclass=ptype[mode], lonlatdepth_dtype=lonlatdepth_dtype)
assert np.allclose([p.lon for p in pset], lon, rtol=1e-12)
assert np.allclose([p.lat for p in pset], lat, rtol=1e-12)
assert isinstance(pset[0].lat, lonlatdepth_dtype)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_create_list_with_customvariable(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart, dtype=np.float32)
lat = np.linspace(1, 0, npart, dtype=np.float32)
class MyParticle(ptype[mode]):
v = Variable('v')
v_vals = np.arange(npart)
pset = pset_type[pset_mode]['pset'].from_list(fieldset, lon=lon, lat=lat, v=v_vals, pclass=MyParticle)
assert np.allclose([p.lon for p in pset], lon, rtol=1e-12)
assert np.allclose([p.lat for p in pset], lat, rtol=1e-12)
assert np.allclose([p.v for p in pset], v_vals, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('restart', [True, False])
def test_pset_create_fromparticlefile(fieldset, pset_mode, mode, restart, tmpdir):
filename = tmpdir.join("pset_fromparticlefile.nc")
lon = np.linspace(0, 1, 10, dtype=np.float32)
lat = np.linspace(1, 0, 10, dtype=np.float32)
class TestParticle(ptype[mode]):
p = Variable('p', np.float32, initial=0.33)
p2 = Variable('p2', np.float32, initial=1, to_write=False)
p3 = Variable('p3', np.float32, to_write='once')
pset = pset_type[pset_mode]['pset'](fieldset, lon=lon, lat=lat, depth=[4]*len(lon), pclass=TestParticle, p3=np.arange(len(lon)))
pfile = pset.ParticleFile(filename, outputdt=1)
def Kernel(particle, fieldset, time):
particle.p = 2.
if particle.lon == 1.:
particle.delete()
pset.execute(Kernel, runtime=2, dt=1, output_file=pfile)
pfile.close()
pset_new = pset_type[pset_mode]['pset'].from_particlefile(fieldset, pclass=TestParticle, filename=filename,
restart=restart, repeatdt=1)
for var in ['lon', 'lat', 'depth', 'time', 'p', 'p2', 'p3']:
assert np.allclose([getattr(p, var) for p in pset], [getattr(p, var) for p in pset_new])
if restart:
assert np.allclose([p.id for p in pset], [p.id for p in pset_new])
pset_new.execute(Kernel, runtime=2, dt=1)
assert len(pset_new) == 3*len(pset)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy'])
@pytest.mark.parametrize('lonlatdepth_dtype', [np.float64, np.float32])
def test_pset_create_field(fieldset, pset_mode, mode, lonlatdepth_dtype, npart=100):
np.random.seed(123456)
shape = (fieldset.U.lon.size, fieldset.U.lat.size)
K = Field('K', lon=fieldset.U.lon, lat=fieldset.U.lat,
data=np.ones(shape, dtype=np.float32), transpose=True)
pset = pset_type[pset_mode]['pset'].from_field(fieldset, size=npart, pclass=ptype[mode],
start_field=K, lonlatdepth_dtype=lonlatdepth_dtype)
assert (np.array([p.lon for p in pset]) <= K.lon[-1]).all()
assert (np.array([p.lon for p in pset]) >= K.lon[0]).all()
assert (np.array([p.lat for p in pset]) <= K.lat[-1]).all()
assert (np.array([p.lat for p in pset]) >= K.lat[0]).all()
assert isinstance(pset[0].lat, lonlatdepth_dtype)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
def test_pset_create_field_curvi(pset_mode, npart=100):
np.random.seed(123456)
r_v = np.linspace(.25, 2, 20)
theta_v = np.linspace(0, np.pi/2, 200)
dtheta = theta_v[1]-theta_v[0]
dr = r_v[1]-r_v[0]
(r, theta) = np.meshgrid(r_v, theta_v)
x = -1 + r * np.cos(theta)
y = -1 + r * np.sin(theta)
grid = CurvilinearZGrid(x, y)
u = np.ones(x.shape)
v = np.where(np.logical_and(theta > np.pi/4, theta < np.pi/3), 1, 0)
ufield = Field('U', u, grid=grid)
vfield = Field('V', v, grid=grid)
fieldset = FieldSet(ufield, vfield)
pset = pset_type[pset_mode]['pset'].from_field(fieldset, size=npart, pclass=ptype['scipy'], start_field=fieldset.V)
lons = np.array([p.lon+1 for p in pset])
lats = np.array([p.lat+1 for p in pset])
thetas = np.arctan2(lats, lons)
rs = np.sqrt(lons*lons + lats*lats)
test = np.pi/4-dtheta < thetas
test *= thetas < np.pi/3+dtheta
test *= rs > .25-dr
test *= rs < 2+dr
assert np.all(test)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_create_with_time(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart)
lat = np.linspace(1, 0, npart)
time = 5.
pset = pset_type[pset_mode]['pset'](fieldset, lon=lon, lat=lat, pclass=ptype[mode], time=time)
assert np.allclose([p.time for p in pset], time, rtol=1e-12)
pset = pset_type[pset_mode]['pset'].from_list(fieldset, lon=lon, lat=lat, pclass=ptype[mode],
time=[time]*npart)
assert np.allclose([p.time for p in pset], time, rtol=1e-12)
pset = pset_type[pset_mode]['pset'].from_line(fieldset, size=npart, start=(0, 1), finish=(1, 0),
pclass=ptype[mode], time=time)
assert np.allclose([p.time for p in pset], time, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_repeated_release(fieldset, pset_mode, mode, npart=10):
time = np.arange(0, npart, 1) # release 1 particle every second
pset = pset_type[pset_mode]['pset'](fieldset, lon=np.zeros(npart), lat=np.zeros(npart),
pclass=ptype[mode], time=time)
assert np.allclose([p.time for p in pset], time)
def IncrLon(particle, fieldset, time):
particle.lon += 1.
pset.execute(IncrLon, dt=1., runtime=npart)
assert np.allclose([p.lon for p in pset], np.arange(npart, 0, -1))
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_dt0(fieldset, pset_mode, mode, npart=10):
pset = pset_type[pset_mode]['pset'](fieldset, lon=np.zeros(npart), lat=np.zeros(npart),
pclass=ptype[mode])
def IncrLon(particle, fieldset, time):
particle.lon += 1
pset.execute(IncrLon, dt=0., runtime=npart)
assert np.allclose([p.lon for p in pset], 1.)
assert np.allclose([p.time for p in pset], 0.)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
def test_pset_repeatdt_check_dt(pset_mode, fieldset):
pset = pset_type[pset_mode]['pset'](fieldset, lon=[0], lat=[0], pclass=ScipyParticle, repeatdt=5)
def IncrLon(particle, fieldset, time):
particle.lon = 1.
pset.execute(IncrLon, dt=2, runtime=21)
assert np.allclose([p.lon for p in pset], 1) # if p.dt is nan, it won't be executed so p.lon will be 0
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_repeatdt_custominit(fieldset, pset_mode, mode):
class MyParticle(ptype[mode]):
sample_var = Variable('sample_var')
pset = pset_type[pset_mode]['pset'](fieldset, lon=0, lat=0, pclass=MyParticle, repeatdt=1, sample_var=5)
def DoNothing(particle, fieldset, time):
return StateCode.Success
pset.execute(DoNothing, dt=1, runtime=21)
assert np.allclose([p.sample_var for p in pset], 5.)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_stop_simulation(fieldset, pset_mode, mode):
pset = pset_type[pset_mode]['pset'](fieldset, lon=0, lat=0, pclass=ptype[mode])
def Delete(particle, fieldset, time):
if time == 4:
return OperationCode.StopExecution
pset.execute(Delete, dt=1, runtime=21)
assert pset[0].time == 4
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_access(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart, dtype=np.float32)
lat = np.linspace(1, 0, npart, dtype=np.float32)
pset = pset_type[pset_mode]['pset'](fieldset, lon=lon, lat=lat, pclass=ptype[mode])
assert(pset.size == 100)
assert np.allclose([pset[i].lon for i in range(pset.size)], lon, rtol=1e-12)
assert np.allclose([pset[i].lat for i in range(pset.size)], lat, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_custom_ptype(fieldset, pset_mode, mode, npart=100):
class TestParticle(ptype[mode]):
p = Variable('p', np.float32, initial=0.33)
n = Variable('n', np.int32, initial=2)
pset = pset_type[pset_mode]['pset'](fieldset, pclass=TestParticle,
lon=np.linspace(0, 1, npart),
lat=np.linspace(1, 0, npart))
assert(pset.size == npart)
assert np.allclose([p.p - 0.33 for p in pset], np.zeros(npart), atol=1e-5)
assert np.allclose([p.n - 2 for p in pset], np.zeros(npart), rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_add_explicit(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart)
lat = np.linspace(1, 0, npart)
pset = pset_type[pset_mode]['pset'](fieldset, lon=[], lat=[], pclass=ptype[mode], lonlatdepth_dtype=np.float64)
for i in range(npart):
particle = pset_type[pset_mode]['pset'](pclass=ptype[mode], lon=lon[i], lat=lat[i],
fieldset=fieldset, lonlatdepth_dtype=np.float64)
pset.add(particle)
assert(pset.size == 100)
assert np.allclose([p.lon for p in pset], lon, rtol=1e-12)
assert np.allclose([p.lat for p in pset], lat, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_add_shorthand(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart, dtype=np.float32)
lat = np.linspace(1, 0, npart, dtype=np.float32)
pset = pset_type[pset_mode]['pset'](fieldset, lon=[], lat=[], pclass=ptype[mode])
for i in range(npart):
pset += pset_type[pset_mode]['pset'](pclass=ptype[mode], lon=lon[i], lat=lat[i], fieldset=fieldset)
assert(pset.size == 100)
assert np.allclose([p.lon for p in pset], lon, rtol=1e-12)
assert np.allclose([p.lat for p in pset], lat, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_add_execute(fieldset, pset_mode, mode, npart=10):
def AddLat(particle, fieldset, time):
particle.lat += 0.1
pset = pset_type[pset_mode]['pset'](fieldset, lon=[], lat=[], pclass=ptype[mode])
for i in range(npart):
pset += pset_type[pset_mode]['pset'](pclass=ptype[mode], lon=0.1, lat=0.1, fieldset=fieldset)
for _ in range(3):
pset.execute(pset.Kernel(AddLat), runtime=1., dt=1.0)
assert np.allclose(np.array([p.lat for p in pset]), 0.4, rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_merge_inplace(fieldset, pset_mode, mode, npart=100):
pset1 = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.linspace(1, 0, npart))
pset2 = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.linspace(0, 1, npart))
assert(pset1.size == 100)
assert(pset2.size == 100)
pset1.add(pset2)
assert(pset1.size == 200)
@pytest.mark.xfail(reason="ParticleSet duplication has not been implemented yet")
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_merge_duplicate(fieldset, pset_mode, mode, npart=100):
pset1 = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.linspace(1, 0, npart))
pset2 = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.linspace(0, 1, npart))
pset3 = pset1 + pset2
assert(pset1.size == 100)
assert(pset2.size == 100)
assert(pset3.size == 200)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_remove_index(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart)
lat = np.linspace(1, 0, npart)
pset = pset_type[pset_mode]['pset'](fieldset, lon=lon, lat=lat, pclass=ptype[mode], lonlatdepth_dtype=np.float64)
for ilon, ilat in zip(lon[::-1], lat[::-1]):
assert(pset[-1].lon == ilon)
assert(pset[-1].lat == ilat)
pset.remove_indices(-1)
assert(pset.size == 0)
@pytest.mark.xfail(reason="Particle removal has not been implemented yet")
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_remove_particle(fieldset, pset_mode, mode, npart=100):
lon = np.linspace(0, 1, npart)
lat = np.linspace(1, 0, npart)
pset = pset_type[pset_mode]['pset'](fieldset, lon=lon, lat=lat, pclass=ptype[mode])
for ilon, ilat in zip(lon[::-1], lat[::-1]):
assert(pset.lon[-1] == ilon)
assert(pset.lat[-1] == ilat)
pset.remove_indices(pset[-1])
assert(pset.size == 0)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_remove_kernel(fieldset, pset_mode, mode, npart=100):
def DeleteKernel(particle, fieldset, time):
if particle.lon >= .4:
particle.delete()
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.linspace(1, 0, npart))
pset.execute(pset.Kernel(DeleteKernel), endtime=1., dt=1.0)
assert(pset.size == 40)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_multi_execute(fieldset, pset_mode, mode, npart=10, n=5):
def AddLat(particle, fieldset, time):
particle.lat += 0.1
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.zeros(npart))
k_add = pset.Kernel(AddLat)
for _ in range(n):
pset.execute(k_add, runtime=1., dt=1.0)
assert np.allclose([p.lat - n*0.1 for p in pset], np.zeros(npart), rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_pset_multi_execute_delete(fieldset, pset_mode, mode, npart=10, n=5):
def AddLat(particle, fieldset, time):
particle.lat += 0.1
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode],
lon=np.linspace(0, 1, npart),
lat=np.zeros(npart))
k_add = pset.Kernel(AddLat)
for _ in range(n):
pset.execute(k_add, runtime=1., dt=1.0)
pset.remove_indices(-1)
assert np.allclose([p.lat - n*0.1 for p in pset], np.zeros(npart - n), rtol=1e-12)
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('area_scale', [True, False])
def test_density(fieldset, pset_mode, mode, area_scale):
lons, lats = np.meshgrid(np.linspace(0.05, 0.95, 10), np.linspace(-30, 30, 20))
pset = pset_type[pset_mode]['pset'](fieldset, pclass=ptype[mode], lon=lons, lat=lats)
arr = pset.density(area_scale=area_scale)
if area_scale:
assert np.allclose(arr, 1 / fieldset.U.cell_areas(), rtol=1e-3) # check that density equals 1/area
else:
assert(np.sum(arr) == lons.size) # check conservation of particles
inds = np.where(arr)
for i in range(len(inds[0])): # check locations (low atol because of coarse grid)
assert np.allclose(fieldset.U.lon[inds[1][i]], pset[i].lon, atol=fieldset.U.lon[1]-fieldset.U.lon[0])
assert np.allclose(fieldset.U.lat[inds[0][i]], pset[i].lat, atol=fieldset.U.lat[1]-fieldset.U.lat[0])
@pytest.mark.parametrize('pset_mode', ['soa', 'aos'])
@pytest.mark.parametrize('staggered_grid', ['Agrid', 'Cgrid'])
def test_from_field_exact_val(pset_mode, staggered_grid):
xdim = 4
ydim = 3
lon = np.linspace(-1, 2, xdim, dtype=np.float32)
lat = np.linspace(50, 52, ydim, dtype=np.float32)
dimensions = {'lat': lat, 'lon': lon}
if staggered_grid == 'Agrid':
U = np.zeros((ydim, xdim), dtype=np.float32)
V = np.zeros((ydim, xdim), dtype=np.float32)
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}
mask = np.array([[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]])
fieldset = FieldSet.from_data(data, dimensions, mesh='flat')
FMask = Field('mask', mask, lon, lat)
fieldset.add_field(FMask)
elif staggered_grid == 'Cgrid':
U = np.array([[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0]])
V = np.array([[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0]])
data = {'U': np.array(U, dtype=np.float32), 'V': | np.array(V, dtype=np.float32) | numpy.array |
"""
@authors: <NAME>, <NAME>, <NAME>
DCE-MRI two-compartment filtration model fit
2021
"""
import numpy as np
import sys
from scipy import integrate
np.set_printoptions(threshold=sys.maxsize)
def aif_trapz(aif, time, timepoint, Hct):
""" This function computes the numerical integration for the AIF first and second pass.
Args
----
timepoints (list): DCE dynamic timepoints.
aif (list): arterial input function.
timepoint (int): number of baseline acquisitions.
Hct (float): hematocrit.
Returns
-------
first_pass_aif_new (ndarray): first pass aif from composite trapezoidal rule.
second_pass_aif_new (ndarray): second pass aif from composite trapezoidal rule.
"""
aif0 = np.mean(aif[0:timepoint])
aif_new = (aif-aif0)/(1-Hct)
first_pass_aif_new = integrate.cumtrapz(aif_new,time)
first_pass_aif_new = np.insert(first_pass_aif_new,0,0)#add extra zero to make array back to 265
second_pass_aif_new = integrate.cumtrapz(first_pass_aif_new,time)
second_pass_aif_new = np.insert(second_pass_aif_new,0,0)#add extra zero to make array back to 265
return first_pass_aif_new, second_pass_aif_new
def Linear_Least_Squares_2CFM(images_to_be_fitted, time, timepoint, first_pass_aif_new, second_pass_aif_new, return_parameters=True):
""" Linear least squares 2-compartment filtration model fit.
Args
----
images_to_be_fitted (numpy.ndarray): input image at all time-series (i.e. at each DCE dynamic measurement) with shape [x-dim*y-dim, total time-series].
time (list): corresponding timepoints at each AIF.
timepoint (int): user-defined timepoint.
first_pass_aif_new (ndarray): first pass aif from composite trapezoidal rule
second_pass_aif_new (ndarray): second pass aif from composite trapezoidal rule
return_parameters (condition): User-defined condition to return paramter maps. Default is True. If False then empty parameter maps are returned.
Returns
-------
Sfit (numpy.ndarray): signal model fit at all time-series with shape [x-dim*y-dim, total time-series].
Fp (numpy.ndarray): fitted parameter 'Fp' with shape [x-dim*y-dim].
Tp (numpy.ndarray): fitted parameter 'Tp' with shape [x-dim*y-dim].
PS (numpy.ndarray): fitted parameter 'PS' with shape [x-dim*y-dim].
Te (numpy.ndarray): fit parameter 'Te' with shape [x-dim*y-dim].
"""
shape = np.shape(images_to_be_fitted)
S0 = np.empty(shape[0])
St = images_to_be_fitted # signal
Ct = np.empty(shape) #concentration
Sfit = np.empty(shape)
Cfit = np.empty(shape)
for x in range(shape[0]):#pixels
S0[x] = np.mean(St[x,0:timepoint]) # timepoint = 15 baselines only
Ct[x,:] = St[x,:]-S0[x]
time = np.tile(time, (shape[0],1)) # tile to repeat to match ct_new shape
first_pass_ct_new = integrate.cumtrapz(Ct,time)
first_pass_ct_new = np.insert(first_pass_ct_new,0,0, axis=1)#add extra zero to make array back to 265
second_pass_ct_new = integrate.cumtrapz(first_pass_ct_new,time)
second_pass_ct_new = np.insert(second_pass_ct_new,0,0, axis=1)#add extra zero to make array back to 265
X = np.empty([shape[0],4])
A = np.empty([265,4])
A[:,2] = second_pass_aif_new
A[:,3] = first_pass_aif_new
alpha = np.empty(shape[0])
beta = np.empty(shape[0])
gamma = np.empty(shape[0])
Fp = np.empty(shape[0])
for x in range(shape[0]):
A[:,0] = - second_pass_ct_new[x,:]
A[:,1] = - first_pass_ct_new[x,:]
X[x,:] = np.linalg.lstsq(A,Ct[x,:],rcond=None)[0]
Cfit[x,:] =X[x,0]*A[:,0] + X[x,1]*A[:,1] + X[x,2]*A[:,2] + X[x,3]*A[:,3]
Sfit[x,:] = S0[x]+Cfit[x,:]
alpha[x] = X[x,0]
beta[x] = X[x,1]
gamma[x] = X[x,2]
Fp[x] = X[x,3]
if return_parameters:
if alpha.all() == 0: # TODO: conditions TBC with Steven
Tp = 1/beta
PS = np.zeros(shape[0])
Te = np.zeros(shape[0])
else:
if alpha.all() == 0 and beta.all() == 0:
Fp = np.zeros(shape[0])
Tp = np.zeros(shape[0])
PS = np.zeros(shape[0])
Te = np.zeros(shape[0])
else:
T = gamma/(alpha*Fp)
det = np.square(beta)-4*alpha
if det < 0 :
Tp = beta/(2*alpha)
Te = beta/(2*alpha)
else:
Tp = (beta - np.sqrt(np.square(beta)-4*alpha))/(2*alpha)
Te = (beta + np.sqrt(np.square(beta)-4*alpha))/(2*alpha)
if Te == 0:
Fp = np.zeros(shape[0])
Tp = np.zeros(shape[0])
PS = | np.zeros(shape[0]) | numpy.zeros |
import torch
from model import D, FrontEnd, G, Q
import numpy as np
from torch.autograd import Variable
from torchvision.utils import save_image
import os
has_cuda = torch.cuda.is_available()
model_path = './results/False/2019-01-16-15-57-41'
device = torch.device("cuda" if has_cuda else "cpu")
b_sz = 100
# torch.manual_seed(1)
def _noise_sample(dis_c, con_c, noise, bs):
idx = np.random.randint(10, size=bs)
c = np.zeros((bs, 10))
c[range(bs), idx] = 1.0
idx_2 = np.random.randint(2, size=bs)
c_2 = np.zeros((bs, 2))
c_2[range(bs), idx_2] = 1.0
# print('c_2: ', c_2)
dis_c.data.copy_(torch.Tensor(c))
con_c.data.copy_(torch.Tensor(c_2))
noise.data.uniform_(-1.0, 1.0)
print('noise: ', noise.shape)
z = torch.cat([noise, dis_c, con_c], 1).view(-1, 74, 1, 1)
return z, idx, idx_2
model_Q = Q().to(device)
model_FE = FrontEnd().to(device)
model_G = G().to(device)
model_D = D().to(device)
model_Q.load_state_dict(torch.load(model_path + '/model_Q.pytorch', map_location='cpu'))
model_D.load_state_dict(torch.load(model_path + '/model_D.pytorch', map_location='cpu'))
model_FE.load_state_dict(torch.load(model_path + '/model_FE.pytorch', map_location='cpu'))
model_G.load_state_dict(torch.load(model_path + '/model_G.pytorch', map_location='cpu'))
model_Q.eval()
model_D.eval()
model_FE.eval()
model_G.eval()
dis_c = torch.FloatTensor(b_sz, 10).to(device)
con_c = torch.FloatTensor(b_sz, 2).to(device)
noise = torch.FloatTensor(b_sz, 62).to(device)
dis_c = Variable(dis_c)
con_c = Variable(con_c)
noise = Variable(noise)
dis_c.data.resize_(b_sz, 10)
con_c.data.resize_(b_sz, 2)
noise.data.resize_(b_sz, 62)
fix_noise = np.tile(np.linspace(-1, 1, num=5), 12)
fix_noise = torch.from_numpy(np.tile(np.concatenate([fix_noise, np.linspace(-1, 1, num=2)]), 100).reshape([-1, 62]))
# fix_noise = torch.Tensor(100, 62).uniform_(-1, 1)
# print(fix_noise.shape)
addons = np.linspace(-0.5, 0.5, num=10)
print('len(fix_noise): ', len(fix_noise))
for i in range(len(fix_noise)):
fix_noise[i] += addons[i % 10]
print(fix_noise)
fix_idx = np.arange(10).repeat(10)
one_hot = np.zeros((100, 10))
one_hot[range(100), fix_idx] = 1
noise.data.copy_(fix_noise)
dis_c.data.copy_(torch.Tensor(one_hot))
c = np.ones(shape=[b_sz, 1])
c1 = np.hstack([c, | np.zeros_like(c) | numpy.zeros_like |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 08 17:46:45 2017
@author: apfranco
"""
import numpy as np
import scipy
from scipy.optimize import leastsq
def RockPhysicsCalibration(agd, OM):
# ALGORITMO PARA CALIBRACAO DE MODELOS DE FISICA DE ROCHA
#
# MODELOS
# 1 - porosidade de neutrons:
# phi = A + B phiE + C vsh ou
# 2 - raios gama:
# gr = grmin + (grmax - grmin) vsh
# 3 - modelo densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh * (1 - phiE);
# 4 - resistividade:
# 1/ Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
#
# DESCRICAO GERAL:
# O programa deve ser rodado para gerar os coefientes e densidades acima descritos
# para serem usados em etapas posteriores de inferencia de porosidade,
# volume de argila e saturacao. O programa fornece opcao de entrada de
# limites estratigraficos conhecidos, realizando uma calibracao geral para
# todo o pacote e tambem em grupos separados em funcao do volume de
# folhelho como funcao de um valor de corte (cutclay). O programa fornece 3
# opcoes de saida envolvendo calibracao em todo o segmento analizado, em
# segmentos menores definidos na entrada (secHoriz) ou em nesses mesmos segmentos
# menores subdivididos ainda mais em funcao do conteudo de folhelho.
#
# PARAMETROS DE ENTRADA:
# dados de perfis - raios gama, porosidade, densidade, VP e VS
# dados de testemunho (se disponiveis) - volume de argila, porosidade, densidade
# top, bot - limites superior e inferior da secao a ser analisada
# phiSand - porosidade de areia homogenea (zero em conteudo de argila)
# grmin, grmax - valores minimo e maximo para a conversao de raios gama em volume de folhelho
# cutclay - valor limite para a transicao de areia para folhelho (grao para matriz suportada)
# secHoriz - Matriz (nFac x 2) contendo os limites superior e inferior de cada unidade estratigrafica
# satUncert - =0 desliga seletor de calibracao para horizonte com oleo.
# Caso contrario iOut necesariamente igual a 3
# iOut - seletor de detalhamento de facies para saida de parametros 1, 2,
# ou 3, conforme explicado acima.
# modPhiC - seletor do tipo de porosidade de calibracao (porosidade
# efetiva): = 1 perfil porosidade de neutros; = 2 porosidade
# efetiva independente (ex. testemunho); = 3 porosidade efetiva
# calculada pela formula 1 acima.
# OBS: CUIDADO opcao modPhiC = 3 carece de aprimoramentos devendo ser usada em
# casos muito especificos. Em geral produz matrizes mal condicionadas.
#
# PARAMETROS DE SAIDA:
# calibData_nomePoco - arquivo contendo os dados de referencia para o processo de calibracao
# phiC
# clayC
# rhoC
# resC
# calibCPR_Vel_nomePoco - arquivo contendo os parametros do modelo linear de velocidade de Han
# facies
# phiSand
# neutron
# denLitho
# cValuesPhi
# cValuesChi
# covMatrixPar
# coefVP
# coefVS
# fluidProp
# fluidPars
print ("CHAMANDO A FUNCAO EM ALGO")
#Parametros de entrada
inputPars = agd.get_input()
well_uid = agd.get_well_uid()
log_index = OM.list('log', well_uid)[0]
indexes = log_index.get_index()[0]
z = indexes[0].data
topCL = inputPars.get('topCL', None) #Intervalo para calibracao (com agua)
botCL = inputPars.get('botCL', None)
top = inputPars.get('top', None) #Intervalo para inferencia
bot = inputPars.get('bot', None)
indLog = np.argwhere(np.logical_and(z>=top, z<=bot))
indLog = np.squeeze(indLog,1)
#Input dos Perfis de pressao
press_file = np.loadtxt('U:/bkp_Windows06nov2017/Documents/Pocos_Morena/MA20.prs')
z = z[indLog]
gr = inputPars.get('gr', None )
gr = gr[indLog]
gr = logInterp(gr,z)
phi = inputPars.get('phi', None )
phi = phi[indLog]
phi = logInterp(phi,z)
rhoFull = inputPars.get('rho', None )
rho = rhoFull[indLog]
rho = logInterp(rho,z)
res = inputPars.get('res', None )
res = res[indLog]
if (np.all(res == np.NaN)):
res = np.empty(np.size(indLog))
else:
res = logInterp(res,z)
fac = inputPars.get('fac', None )
fac = fac[indLog]
fac = np.array(np.floor(fac), dtype=int)
fac = logInterp(fac,z)
#Input dos Perfis de pressao
zProv = indexes[0].data
mpp = 0.0980665*press_file[:,0]
mtzp = press_file[:,1]
lpres, cpres = np.shape(press_file)
if (cpres == 3):
mmzp = press_file[:,cpres - 1]
else:
mmzp = np.empty([0,0])
nDP = np.size(mtzp)
tvdss = inputPars.get('tvdss', None )
tvdss = tvdss[indLog]
izp = np.empty(nDP, dtype=int)
if (np.size(mmzp) == 0):
indr = indLog
lindr = np.size(indr) - 1
tol = 0.1
for i in range (0, nDP):
indp = np.argwhere(np.logical_and(tvdss <= (mtzp[i] + tol), tvdss >= (mtzp[i] - tol)))
indp= np.squeeze(indp,1)
cizp = np.argwhere(np.logical_and(indp >= indr[0], indp <= indr[lindr]))
cizp= np.squeeze(cizp,1)
if (np.size(cizp) == 0):
izp[i] = np.argmin(np.abs(tvdss - mtzp[i]))
else:
izp[i] = indp[cizp[0]]
mzp = zProv[izp]
matsort = np.concatenate([[mzp],[mpp], [mtzp],[izp]]).T
indsort = np.argsort(matsort[:,0],0)
matsort = np.array([[matsort[indsort,0]],[matsort[indsort,1]],[matsort[indsort,2]],[matsort[indsort,3]]]).T
matsort = np.squeeze(matsort)
mzp = matsort[:,0]
mpp = matsort[:,1]
mtzp = matsort[:,2]
izp = matsort[:,3].astype(int)
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
else:
mzp = mmzp
for i in range (0, nDP):
izp[i] = np.argmin(np.abs(zProv - mzp[i]))
zp = zProv[izp[0]:izp[nDP - 1] + 1]
rhopp = rhoFull[izp[0]:izp[nDP - 1] + 1]
rhopp = logInterp(rhopp, zp)
phiCore = np.empty([0,0])
secHoriz = np.array([top, bot])
#Parametros e dados de calibracao e saida
nFac = 4
modPhiC = 1 #indicador do tipo de dado de calibracao a ser usado como porosidade efetiva
#1: perfil de neutrons 2: perfil de porosidade efetiva
useCore = 0
iOut = 2
#iuseclay = 0 #indicador do tipo de argilosidade a ser usado
#0: vsh direto do perfil 1: clay (calculada atraves do GR)
#Parametros de densidade
rhoMin = np.array([2.55, 2.569, 2.623, 2.707]) #Existem 4 facies na regiao relatada
#Parametros de resistividade
mP = 2.0 # expoente de cimentacao em areias limpas: 1.3 (inconsolidado) - 2.0 (consol.)
nS = 2.0 # expoente de saturacao em areias limpas 1.5 - 2.0.
# E reduzido na presenca de laminacao e microporosidade
aT = 0.8 # constante da eq. de Archie
Rw = 0.028 # resistividade da agua
Rsh = 2.048 # resistividade do folhelho
resCoef = np.array([[mP, nS, aT*Rw, Rsh], [1.5, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh], [2.0, nS, aT*Rw, Rsh]])
# Secao de Propriedades dos fluidos e matrizes de areia e folhelho
#Parametros
#calculo da pressao
pres_poros = np.mean(mpp) # pressao de poro referencia para o calc da densidade
temp = 89.0 # temperatura oC
sal = 102400 # salinidade
RGO = 75.0 # razao gas oleo
API = 29.0 # grau API
G = 0.835 # gravidade especifica
#Ordenar parametros no vetor para chamada da funcao
fluidPars = np.array([pres_poros, temp, sal, RGO, API, G])
#AQUI COMECA O CODIGO secCalibVshPhiRhoRes_vpHan
#Trecho de calibracao
indCL = np.where(np.logical_and(z>=topCL, z<=botCL))
nData = np.size(z)
# Calculo de porosidade efetiva e vsh com estimativa dos valores
# de grmin e grmax em todo o pacote coberto pelos dados
# Transformacao dos dados observados
# Volume de folhelho a partir de rais gama
indSh = np.argwhere(fac==4)
indSh= np.squeeze(indSh,1)
indSd = np.argwhere(fac == 1)
indSd= np.squeeze(indSd,1)
if (np.size(indSh) == 0 and np.size(indSd) == 0):
grmax = np.percentile(gr, 95)
grmin = np.percentile(gr, 5)
else:
grmax = np.percentile(gr[indSh], 95) #146.3745
grmin = np.percentile(gr[indSd], 5) #54.2600
claye = vshGRcalc(gr, grmin, grmax)
#Por enquanto usando apenas modPhic == 1
if modPhiC == 1:
grlim = grmax
ind = np.where (gr>= grlim)
phiNsh = np.median(phi[ind])
phiEe = np.fmax(0.01, phi - claye*phiNsh)
modPhiC =2
elif (modPhiC == 2 and np.size(phiCore) == 0):
print ("Nao existe a funcao chamada aqui dentro")
#phiEe = phiSd2phiE (zR, claye, phiSand, secHoriz)
elif (modPhiC == 2 and useCore == 1 ):
phiEe = phiCore
#fluidProp matriz com valores para Kf e densidade para fases salmoura,
#oleo e gas, ordenados da seguinte forma:
#bulk_salmoura, bulk_oleo, bulk_gas (modulo variavel com a pressao
#rho_salmoura, rho_oleo, rho_gas (so a densidade sera fixa)
nDP = np.size(mpp)
fluidPropP = np.empty([nDP, 2, 3]) #esqueleto de nDP 'paginas' que guardara
#as matrizes 2x3 de retorno da funcao seismicPropFluids
for i in np.arange(0, nDP):
#atualizar pressao de poro
fluidPars[0] = mpp[i]
fluidPropP[i] = seismicPropFluids(fluidPars)
fluidProp = np.mean(fluidPropP, 0)
rhoFluids = fluidProp[1]
rhoW = rhoFluids[0]
rhoO = rhoFluids[1]
#rock physics model calibration
#selecao de perfis apenas na regial de calibracao com agua
phiC = phiEe[indCL]
clayC = claye[indCL]
rhoCL = rho[indCL]
resCL = res[indCL]
phiCL = phi[indCL]
facCL = fac[indCL]
# Calibracao para toda a secao
rhoMin_T = np.median(rhoMin);
opt = 2
if (opt == 1):
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes(phiCL, rhoCL, resCL, clayC, phiC, rhoMin_T, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi_T, phiMod_T, cRho_T, rhoMod_T, cRes_T, resMod_T] = calibClayPhiRhoRes2(phiCL, rhoCL, resCL, clayC, phiC , rhoW, resCoef, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar_T = np.concatenate([[cPhi_T[0]], [cPhi_T[1]], [cPhi_T[2]]])
denPar_T = np.concatenate([[rhoSd], [rhoWe], [rhoO], [rhoSh], [rhoDisp]])
resPar_T = cRes_T
[phiMod_T, rhoMod_T, resMod_T] = calibCPRRreMod(phiEe, claye, phiPar_T , denPar_T, resPar_T, modPhiC)
facies_T = np.ones((nData,1))
phiMod = np.zeros((nData,1))
rhoMod = np.zeros((nData,1))
resMod = np.zeros((nData,1))
phiPar = np.empty([nFac,3])
denPar = np.empty([nFac,5])
resPar = np.empty([nFac,4])
facH = np.zeros([np.size(facCL),1])
for i in range(0,nFac):
ind = np.argwhere(facCL == i + 1)
ind= np.squeeze(ind,1)
secPhi = phiCL[ind]
secRho = rhoCL[ind]
secRes = resCL[ind]
secClayC = clayC[ind]
secPhiC = phiC[ind]
#[cHan,vpMod(ind),s2] = calibHan(secVP,secPhiC,secClayC);
#coefHanVP(i,:) = cHan';
# a parte de porosidade de neutrons e densidade nao utiliza separacao
# e calibracao distinta para grupamentos em termos de volume de
# folhelho. Os coeficientes sao repetidos (iguais) para areia e folhelho
resCoef_line = np.empty((resCoef.shape[0],1))
resCoef_line[:,0] = resCoef[i]
if (opt == 1):
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes(secPhi, secRho, secRes, secClayC, secPhiC , rhoMin[i], resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = cRho_T[1]
rhoSh = cRho_T[2]
rhoDisp = cRho_T[2]
else:
[cPhi, dataPhi, cRho, dataRho, cRes, dataRes] = calibClayPhiRhoRes2(secPhi, secRho, secRes, secClayC, secPhiC , rhoW, resCoef_line, modPhiC)
rhoSd = cRho_T[0]
rhoWe = rhoW
rhoSh = cRho_T[1]
rhoDisp = cRho_T[1]
phiPar[i] = np.array([cPhi[0], cPhi[1], cPhi[2]])
denPar[i] = np.array([rhoSd, rhoWe, rhoO, rhoSh, rhoDisp])
resPar[i] = cRes
facH[ind] = i + 1
resPar_line = np.empty([1,nFac])
resPar_line[0,:] = resPar[i]
ind = np.argwhere(fac == i + 1)
ind= np.squeeze(ind,1)
passArg = np.array([rhoSd, rhoW, rhoSh])
[dataPhi, dataRho, dataRes] = calibCPRRreMod(phiEe[ind], claye[ind], phiPar[i],passArg, resPar_line, modPhiC)
phiMod[ind,0] = dataPhi
rhoMod[ind,0] = dataRho
resMod[ind] = dataRes
if (iOut == 1):
nOutFac = 1
facies = facies_T
neutron = phiPar_T
denLitho = denPar_T
rhoComp = rhoMod_T
phiComp = phiMod_T
resComp = resMod_T
elif (iOut == 2):
nOutFac = np.ones([nFac,1])
facies = facH
neutron = phiPar
denLitho = denPar
denLitho[:,4] = neutron[:,2]
rhoComp = rhoMod
phiComp = phiMod
resComp = resMod
else:
raise Exception ('Seletor de saida deve ser 1 ou 2')
r2Phi = rsquared (phiComp, phi)
r2Rho = rsquared (rhoComp, rho)
r2Res = rsquared (resComp, res)
print ("Fim da calibracao, com seguintes ajustes R2:\n Phi = %7.2f\n RHO = %7.2f\n RES = %7.2f\n" % (r2Phi, r2Rho, r2Res))
#Saida de Dados
def calibClayPhiRhoRes(phi, rho, Rt, vsh, phiE, rhoMin, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoMin - densidade media dos graos minerais constituintes da matriz da rocha
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
rhoPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
vecConc = vsh*(1-phiE)
B = np.concatenate([[phiE], [vecConc]])
xRho1 = fitNorm1(B, (rho - rhoMin), 10)
rhoPar[0] = rhoMin
rhoPar[1] = xRho1[0] + rhoMin
rhoPar[2] = xRho1[1] + rhoMin
rhoComp = np.dot(B,xRho1) + rhoMin
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0] #checar como vai se comportar sem lb e ub
RtPar = np.concatenate([cRes, xRes])
RtPar = RtPar.reshape(1, RtPar.size)
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibClayPhiRhoRes2(phi, rho, Rt, vsh, phiE, rhoWater, RtCoef, mode):
""" FINALIDADE: calcular parametros dos modelos de porosidade e densidade
a partir do ajuste dos dados de perfis de porosidade de neutrons e de
densidade, usando informacoes de volume de folhelho e de porosidade efetiva
com 3 opcoes distintas para a porosidade efetiva:
1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
ENTRADA:
phi - perfil de neutrons
rho - perfil de densidade
vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
phiE - perfil de porosidade efetiva
rhoWater - densidade da agua
RtCoef -
mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
casos acima descritos.
SAIDA:
phiPar - parametros de ajuste do modelo de porosidade de neutrons
phiComp - perfil calculado de porosidade de neutrons
rhoPar - parametros de ajuste do modelo de densidade
rhoComp - perfil calculado de densidade
MODELOS
porosidade de neutrons:
phi = A + 1.0 phiE + C vsh
modelo de densidade:
rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
modelo de resistividade:
Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
"""
if((mode != 1) and (mode != 2) and (mode != 3)):
raise Exception("Seletor de porosidadade efetiva de entrada deve ser 1 ou 2!")
n = np.size(vsh)
if (np.size(phi) != n or np.size(rho) != n):
raise Exception("Vetores de entrada devem ter as mesmas dimensoes")
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ("Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada")
options = np.empty([0,0])
lb = np.array([0.0, 0.5])
ub = np.array([0.5, 4.0])
x0 = RtCoef[2:4,0]
cRes = RtCoef[0:2,0]
phiPar = np.empty(3)
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode == 2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiE + vsh)
A = np.concatenate([[col1], [phiE], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
# parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode ==3):
phiSand = 0.25
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
col2 = np.ones(n)*phiSand
A = np.concatenate([[col1], [col2], [vsh]]).T
xPhi2 = fitNorm1(A,phi,10)
#parametros do modelo para ajuste da porosidade de neutrons
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1]
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1-phiE)
B = np.concatenate([[col1], [col2]]).T
rhoCte = rhoWater * phiE
xRho = fitNorm1(B, (rho - rhoCte),10)
rhoPar = np.empty(2)
rhoPar[0] = xRho[0]
rhoPar[1] = xRho[1]
rhoComp = np.dot(B, xRho) + rhoCte
xRes = scipy.optimize.leastsq(ofSimandouxPhiChiSw100, x0, args=(Rt, cRes, phiE, vsh))[0]
print ("VALORES DE xRES", xRes)
RtPar = np.concatenate([cRes, xRes])
RtPar = np.reshape(RtPar,(1,np.size(RtPar)))
facies = np.ones((n,1))
RtComp = dCompSimandouxPhiChiSw100(phiE,vsh,facies,RtPar)
return phiPar, phiComp, rhoPar, rhoComp, RtPar, RtComp
def calibCPRRreMod(phiE, vsh, phiPar, rhoPar, RtPar, mode):
# FINALIDADE: calcular os dados modelados usando os modelos calibrados
# em outro intervalo do poco, seguindo as 3 opcoes distintas para a porosidade efetiva:
# 1 - usa o proprio perfil de neutrons como porosidade efetiva (identidade)
# 2 - usa um perfil independente de porosidade efetiva (ex. testemunho)
#
# ENTRADA:
# phi - perfil de neutrons
# rho - perfil de densidade
# vsh - volume de folhelho (normalmente extraido do perfil de raios gama)
# phiE - perfil de porosidade efetiva
# phiPar
# rhoPar - densidade da agua
# RtPar -
# mode - indicador de porosidade efetiva, sendo 1, 2 ou 3 conforme os
# casos acima descritos.
#
# SAIDA:
# phiComp - perfil calculado de porosidade de neutrons
# rhoComp - perfil calculado de densidade
# RtComp
#
#
# MODELOS
# porosidade de neutrons:
# phi = A + 1.0 phiE + C vsh
# modelo de densidade:
# rho = rhoq + (rhof-rhoq) * phiE + (rhoc-rhoq) * vsh;
# modelo de resistividade:
# Rt = ( phiE**m Sw**n ) / ( a Rw (1-chi) ) + ( chi Sw ) / Rsh
if (mode != 1 and mode != 2 and mode != 3):
raise Exception ('Seletor de porosidadade efetiva de entrada deve ser 1 ou 2')
n = np.size(vsh)
if (mode == 1 or mode == 2 and np.size(phiE) != n ):
raise Exception ('Vetor de entrada de porosidade efetiva nao esta com dimensao apropriada');
if (mode == 1):
# o proprio perfil de neutrons fornece a porosidade efetiva, segundo o
# modelo phiN = phiE
phiPar = np.array([0.0, 1.0, 0.0])
phiComp = phiE
elif (mode ==2):
#nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - phiE + vsh
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
elif (mode == 3):
phiSand = 0.25
# nesse caso phiE e' um vetor de porosidade efetiva
col1 = 1 - (phiSand + vsh)
phiPar[0] = xPhi2[0]
phiPar[1] = xPhi2[1] #Verificar o uso desse mode 3, talvez seja melhor cortar fora do if la em cima
phiPar[2] = xPhi2[2]
phiComp = col1 * phiPar[0] + phiE * phiPar[1] + vsh * phiPar[2]
col2 = vsh*(1-phiE)
col1 = (1-vsh)*(1 - phiE)
B = np.concatenate([[col1], [col2]])
rhoCte = rhoPar[1]*phiE
rhoComp = col1 * rhoPar[0] + col2*rhoPar[2] + rhoCte
facies = | np.ones((n,1)) | numpy.ones |
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
import os
import nibabel as nib
import numpy as np
from tqdm import tqdm
import glob
import argparse
parser = argparse.ArgumentParser(
description="Convert Decathlon raw Nifti data "
"(http://medicaldecathlon.com/) "
"files to Numpy data files",
add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--data_path",
default="../../data/decathlon/Task01_BrainTumour/",
help="Path to the raw BraTS datafiles")
parser.add_argument("--save_path",
default="../../data/decathlon/",
help="Folder to save Numpy data files")
parser.add_argument("--resize", type=int, default=128,
help="Resize height and width to this size. "
"Original size = 240")
parser.add_argument("--split", type=float, default=0.85,
help="Train/test split ratio")
args = parser.parse_args()
def crop_center(img, cropx, cropy, cropz):
"""
Take a center crop of the images.
If we are using a 2D model, then we'll just stack the
z dimension so we can take all slices in that case
rather than just the crop.
"""
if len(img.shape) == 4:
x, y, z, c = img.shape
else:
x, y, z = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
startz = z//2-(cropz//2)
if len(img.shape) == 4:
# return img[startx:startx+cropx,starty:starty+cropy,startz:startz+cropz,:]
return img[startx:startx+cropx, starty:starty+cropy, :, :]
else:
# return img[startx:startx+cropx,starty:starty+cropy,startz:startz+cropz]
return img[startx:startx+cropx, starty:starty+cropy, :]
def normalize_img(img):
"""
Normalize the pixel values.
This is one of the most important preprocessing steps.
We need to make sure that the pixel values have a mean of 0
and a standard deviation of 1 t0 help the model to train
faster and more accurately.
"""
for channel in range(img.shape[3]):
img[:, :, :, channel] = (
img[:, :, :, channel] - np.mean(img[:, :, :, channel])) \
/ np.std(img[:, :, :, channel])
return img
def convert_raw_data_to_numpy(trainList, testList, imgList, save_dir):
# Save training set images
print("Step 1 of 4. Save training images.")
first = True
for idx in tqdm(trainList):
# nibabel loads images as X,Y,Z,C (HWDC)
img = np.array(nib.load(imgList[idx]).dataobj)
img = crop_center(img, args.resize, args.resize, args.resize)
img = normalize_img(img)
if first:
imgsArray = img
first = False
else:
imgsArray = np.concatenate([imgsArray, img], axis=2)
np.save(os.path.join(save_dir, "imgs_train.npy"),
np.swapaxes(imgsArray, 0, -2))
del imgsArray
# Save testing set images
print("Step 2 of 4. Save testing images.")
first = True
for idx in tqdm(testList):
img = np.array(nib.load(imgList[idx]).dataobj)
img = crop_center(img, args.resize, args.resize, args.resize)
img = normalize_img(img)
if first:
imgsArray = img
first = False
else:
imgsArray = np.concatenate([imgsArray, img], axis=2)
np.save(os.path.join(save_dir, "imgs_test.npy"),
np.swapaxes(imgsArray, 0, -2))
del imgsArray
# Save training set masks
print("Step 3 of 4. Save training masks.")
first = True
for idx in tqdm(trainList):
msk = np.array(nib.load(mskList[idx]).dataobj)
msk = crop_center(msk, args.resize, args.resize, args.resize)
msk[msk > 1] = 1 # Combine all masks
msk = np.expand_dims(msk, -1)
if first:
msksArray = msk
first = False
else:
msksArray = np.concatenate([msksArray, msk], axis=2)
np.save(os.path.join(save_dir, "msks_train.npy"),
np.swapaxes(msksArray, 0, -2))
del msksArray
# Save testing set masks
print("Step 4 of 4. Save testing masks.")
first = True
for idx in tqdm(testList):
msk = np.array(nib.load(mskList[idx]).dataobj)
msk = crop_center(msk, args.resize, args.resize, args.resize)
msk[msk > 1] = 1 # Combine all masks
msk = np.expand_dims(msk, -1)
if first:
msksArray = msk
first = False
else:
msksArray = np.concatenate([msksArray, msk], axis=2)
np.save(os.path.join(save_dir, "msks_test.npy"),
np.swapaxes(msksArray, 0, -2))
del msksArray
print("Finished processing.")
print("Numpy arrays saved to {}".format(save_dir))
if __name__ == "__main__":
print("Converting Decathlon raw Nifti data files to training and testing"
" Numpy data files.")
print(args)
save_dir = os.path.join(
args.save_path, "{}x{}/".format(args.resize, args.resize))
# Create directory
try:
os.makedirs(save_dir)
except OSError:
if not os.path.isdir(save_dir):
raise
# Check for existing numpy train/test files
check_dir = os.listdir(save_dir)
for item in check_dir:
if item.endswith(".npy"):
os.remove(os.path.join(save_dir, item))
print("Removed old version of {}".format(item))
"""
Get the training file names from the data directory.
Anything ending in .nii.gz in the imagesTr subdirectory
is a training file.
"""
imgList = glob.glob(os.path.join(args.data_path, "imagesTr", "*.nii.gz"))
mskList = [w.replace("imagesTr", "labelsTr") for w in imgList]
"""
Randomize the file list. Then separate into training and
validation (testing) lists.
"""
numFiles = len(imgList)
# Set the random seed so that always get same random mix
np.random.seed(816)
idxList = np.arange(numFiles) # List of file indices
np.random.shuffle(idxList) # Randomize the file list
trainList = idxList[:np.int(numFiles*args.split)]
testList = idxList[ | np.int(numFiles*args.split) | numpy.int |
"""
Project: RadarBook
File: ovals_of_cassini_example.py
Created by: <NAME>
On: 7/2/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from Chapter04.ui.OvalsOfCassini_ui import Ui_MainWindow
from numpy import linspace, log10, sqrt, sin, cos, imag, real
from scipy.constants import c, Boltzmann as k, pi
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.qt_compat import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class OvalsOfCassini(QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
# Connect to the input boxes, when the user presses enter the form updates
self.separation_distance.returnPressed.connect(self._update_canvas)
self.system_temperature.returnPressed.connect(self._update_canvas)
self.bandwidth.returnPressed.connect(self._update_canvas)
self.noise_figure.returnPressed.connect(self._update_canvas)
self.transmit_losses.returnPressed.connect(self._update_canvas)
self.receive_losses.returnPressed.connect(self._update_canvas)
self.peak_power.returnPressed.connect(self._update_canvas)
self.transmit_antenna_gain.returnPressed.connect(self._update_canvas)
self.receive_antenna_gain.returnPressed.connect(self._update_canvas)
self.frequency.returnPressed.connect(self._update_canvas)
self.bistatic_target_rcs.returnPressed.connect(self._update_canvas)
# Set up a figure for the plotting canvas
fig = Figure()
self.axes1 = fig.add_subplot(111)
self.my_canvas = FigureCanvas(fig)
# Add the canvas to the vertical layout
self.verticalLayout.addWidget(self.my_canvas)
self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))
# Update the canvas for the first display
self._update_canvas()
def _update_canvas(self):
"""
Update the figure when the user changes an input value.
:return:
"""
# Get the values from the form
separation_distance = float(self.separation_distance.text())
system_temperature = float(self.system_temperature.text())
bandwidth = float(self.bandwidth.text())
noise_figure = float(self.noise_figure.text())
transmit_losses = float(self.transmit_losses.text())
receive_losses = float(self.receive_losses.text())
peak_power = float(self.peak_power.text())
transmit_antenna_gain = float(self.transmit_antenna_gain.text())
receive_antenna_gain = float(self.receive_antenna_gain.text())
frequency = float(self.frequency.text())
bistatic_target_rcs = float(self.bistatic_target_rcs.text())
# Number of points for plotting ovals
number_of_points = 100000
# Parameters for the Cassini ovals equation
# r ^ 4 + a ^ 4 - 2 a ^ 2 r ^ 2(1 + cos(2 theta)) = b ^ 4
# Parameter "a"
a = 0.5 * separation_distance
# Calculate the wavelength (m)
wavelength = c / frequency
# Calculate the bistatic radar range factor
bistatic_range_factor = (peak_power * transmit_antenna_gain * receive_antenna_gain * wavelength ** 2 *
10.0 ** (bistatic_target_rcs / 10.0)) / ((4.0 * pi) ** 3 * k * system_temperature *
bandwidth * 10.0 ** (noise_figure / 10.0)
* transmit_losses * receive_losses)
# Full angle sweep
t = linspace(0, 2.0 * pi, number_of_points)
# Calculate the signal to noise ratio at which a = b
SNR_0 = 10.0 * log10(16.0 * bistatic_range_factor / separation_distance ** 4)
# Create the list of signal to noise ratios to plot
SNR = [SNR_0 - 6, SNR_0 - 3, SNR_0, SNR_0 + 3]
# Clear the axes for the updated plot
self.axes1.clear()
# Loop over all the desired signal to noise ratios
for s in SNR:
# Convert to linear units
snr = 10.0 ** (s / 10.0)
# Parameter for Cassini ovals
b = (bistatic_range_factor / snr) ** 0.25
if a > b:
# Calculate the +/- curves
r1 = sqrt(a ** 2 * (cos(2.0 * t) + sqrt(cos(2 * t) ** 2 - 1.0 + (b / a) ** 4)))
r2 = sqrt(a ** 2 * ( | cos(2.0 * t) | numpy.cos |
import unittest
import numpy as np
from l5kit.kinematic import fit_ackerman_model_approximate, fit_ackerman_model_exact
class TestAckermanSteeringModel(unittest.TestCase):
def test_fit_ackerman_steering(self) -> None:
# These are only smoke tests for now, to be improved
test_trajectory = | np.array([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) | numpy.array |
from enum import Enum
from ProGED.generators.base_generator import BaseExpressionGenerator
from ProGED.equation_discoverer import EqDisco
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
class SymType(Enum):
Var = 1
Const = 2
Operator = 3
Fun = 4
universal_symbols = [{"symbol": 'x1', "type": SymType.Var, "precedence": 5},
{"symbol": 'x2', "type": SymType.Var, "precedence": 5},
{"symbol": 'x3', "type": SymType.Var, "precedence": 5},
{"symbol": 'x4', "type": SymType.Var, "precedence": 5},
{"symbol": 'x5', "type": SymType.Var, "precedence": 5},
{"symbol": 'C', "type": SymType.Const, "precedence": 5},
{"symbol": '+', "type": SymType.Operator, "precedence": 0},
{"symbol": '-', "type": SymType.Operator, "precedence": 0},
{"symbol": '*', "type": SymType.Operator, "precedence": 1},
{"symbol": '/', "type": SymType.Operator, "precedence": 1},
{"symbol": '^', "type": SymType.Operator, "precedence": 2},
{"symbol": 'sin', "type": SymType.Fun, "precedence": 5},
{"symbol": 'cos', "type": SymType.Fun, "precedence": 5},
{"symbol": 'sqrt', "type": SymType.Fun, "precedence": 5},
{"symbol": 'exp', "type": SymType.Fun, "precedence": 5}]
class GeneratorHVAE(BaseExpressionGenerator):
def __init__(self, model, variables, symbols):
self.generator_type = "HVAE"
self.decoding_dict = symbols
self.precedence = {t["symbol"]: t["precedence"] for t in symbols}
self.constant = [t["symbol"] for t in symbols if t["type"]==SymType.Const][0]
self.variables = variables
if isinstance(model, str):
self.model = torch.load(model)
self.model.eval()
else:
self.model = model
self.input_mean = torch.zeros(1, 1, next(self.model.decoder.parameters()).size(0))
a = 0
@staticmethod
def train_and_init(equations, variables, symbols, representation_size=64, hidden_size=64, batch_size=32, epochs=20,
verbose=True, model_path=None):
s_for_tokenization = {t["symbol"]: t for i, t in enumerate(symbols)}
trees = [tokens_to_tree(e, s_for_tokenization) for e in equations]
model = HVAE(len(symbols), hidden_size, representation_size)
dataset = TreeDataset(symbols, train=trees, test=[])
sampler = TreeSampler(batch_size, len(dataset))
def collate_fn(batch):
return batch
trainloader = torch.utils.data.DataLoader(dataset, batch_sampler=sampler, collate_fn=collate_fn, num_workers=0)
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.CrossEntropyLoss()
iter_counter = 0
lmbda = (np.tanh(-4.5) + 1) / 2
midpoint = len(dataset) // (2 * batch_size)
for epoch in range(epochs):
total = 0
num_trees = 0
with tqdm(total=len(dataset), desc=f'Testing - Epoch: {epoch + 1}/{epochs}', unit='chunks') as prog_bar:
for i, trees in enumerate(trainloader):
total_loss = 0
for t in trees:
mu, logvar, outputs = model(t)
loss, b, k = outputs.loss(mu, logvar, lmbda, criterion)
total_loss += loss
total += loss.detach().item()
num_trees += batch_size
optimizer.zero_grad()
total_loss = total_loss / batch_size
total_loss.backward()
optimizer.step()
prog_bar.set_postfix(**{'run:': "HVAE",
'loss': total / num_trees,
'BCE': b.item(),
'KLD': k.item()})
prog_bar.update(batch_size)
iter_counter += 1
if iter_counter < 2400:
lmbda = (np.tanh((iter_counter - 4500) / 1000) + 1) / 2
if verbose and i == midpoint:
z = model.encode(trees[0])[0]
decoded_tree = model.decode(z, symbols)
print("\nO: {}".format(str(trees[0])))
print("P: {}".format(str(decoded_tree)))
for t in trees:
t.clear_prediction()
if model_path is not None:
torch.save(model, model_path)
return GeneratorHVAE(model_path, variables, symbols)
else:
return GeneratorHVAE(model, variables, symbols)
def generate_one(self):
inp = torch.normal(self.input_mean)
tree = self.model.decode(inp, self.decoding_dict)
# print(str(tree))
tree.change_redundant_variables(self.variables, self.constant)
return tree.to_list(with_precedence=True, precedence=self.precedence), 0, str(inp.tolist())
def decode_latent(self, latent):
tree = self.model.decode(latent, self.decoding_dict)
tree.change_redundant_variables(self.variables, self.constant)
return tree.to_list(with_precedence=True, precedence=self.precedence)
class Node:
def __init__(self, symbol=None, right=None, left=None, target=None):
self.symbol = symbol
self.right = right
self.left = left
self.target = target
self.prediction = None
def __str__(self):
if self.left is None and self.right is None:
return self.symbol
elif self.left is not None and self.right is None:
return f"[{self.symbol}{str(self.left)}]"
else:
return f"[{str(self.left)}{self.symbol}{str(self.right)}]"
def height(self):
hl = self.left.height() if self.left is not None else 0
hr = self.right.height() if self.right is not None else 0
return max(hl, hr) + 1
def to_list(self, notation="infix", with_precedence=False, precedence=None):
if with_precedence and precedence is None:
raise Exception("Should add a dictionary with precendence or list without precedence")
left = [] if self.left is None else self.left.to_list(notation, with_precedence, precedence)
right = [] if self.right is None else self.right.to_list(notation, with_precedence, precedence)
if notation == "prefix":
return [self.symbol] + left + right
elif notation == "postfix":
return left + right + [self.symbol]
elif not with_precedence:
if len(left) > 0 and len(right) == 0:
return [self.symbol] + ["("] + left + [")"]
return left + [self.symbol] + right
else:
if len(left) > 0 and len(right) == 0:
return [self.symbol] + ["("] + left + [")"]
if self.left is not None and precedence[self.symbol] > precedence[self.left.symbol]:
left = ["("] + left + [")"]
if self.right is not None and precedence[self.symbol] > precedence[self.right.symbol]:
right = ["("] + right + [")"]
return left + [self.symbol] + right
def to_dict(self):
d = {'s': self.symbol}
if self.left is not None:
d['l'] = self.left.to_dict()
if self.right is not None:
d['r'] = self.right.to_dict()
return d
def to_vector(self, symbol_dict, n_symbols):
mat = []
vec = torch.zeros(1, n_symbols)
vec[0, symbol_dict[self.symbol]] = 1.0
mat.append(vec)
if self.left is not None:
mat.append(self.left.to_vector(symbol_dict, n_symbols))
if self.right is not None:
mat.append(self.right.to_vector(symbol_dict, n_symbols))
return torch.cat(mat)
@staticmethod
def from_dict(d):
left = None
right = None
if "l" in d:
left = Node.from_dict(d["l"])
if 'r' in d:
right = Node.from_dict(d["r"])
return Node(d["s"], right=right, left=left)
@staticmethod
def to_matrix(tree, matrix_type="prediction"):
reps = []
if tree.left is not None:
reps.append(Node.to_matrix(tree.left, matrix_type))
if matrix_type == "target":
reps.append(torch.Tensor([torch.argmax(tree.target[0, 0, :])]).long())
else:
reps.append(tree.prediction[0, :, :])
if tree.right is not None:
reps.append(Node.to_matrix(tree.right, matrix_type))
return torch.cat(reps)
def create_target_vector(self, symbol_dict, n_symbols):
target = torch.zeros(n_symbols).float()
target[symbol_dict[self.symbol]] = 1.0
self.target = Variable(target[None, None, :])
if self.left is not None:
self.left.create_target_vector(symbol_dict, n_symbols)
if self.right is not None:
self.right.create_target_vector(symbol_dict, n_symbols)
def loss(self, mu, logvar, lmbda, criterion):
pred = Node.to_matrix(self, "prediction")
target = Node.to_matrix(self, "target")
BCE = criterion(pred, target)
KLD = (lmbda * -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()))
return BCE + KLD, BCE, KLD
def trim_to_height(self, max_height, types, const_symbol="c"):
if max_height == 1 and types[self.symbol] is not SymType.Const and types[self.symbol] is not SymType.Var:
self.symbol = const_symbol
self.left = None
self.right = None
if self.left is not None and max_height > 1:
self.left.trim_to_height(max_height-1, types, const_symbol)
if self.right is not None and max_height > 1:
self.right.trim_to_height(max_height-1, types, const_symbol)
def change_redundant_variables(self, variables, constant):
has_child = False
if self.right is not None:
self.right.change_redundant_variables(variables, constant)
has_child = True
if self.left is not None:
self.left.change_redundant_variables(variables, constant)
has_child = True
if not has_child and self.symbol not in variables:
self.symbol = constant
def clear_prediction(self):
if self.left is not None:
self.left.clear_prediction()
if self.right is not None:
self.right.clear_prediction()
self.prediction = None
def tokens_to_tree(tokens, symbols):
"""
tokens : list of string tokens
symbols: dictionary of possible tokens -> attributes, each token must have attributes: nargs (0-2), order
"""
tokens = ["("] + tokens + [")"]
operator_stack = []
out_stack = []
for token in tokens:
if token == "(":
operator_stack.append(token)
elif token in symbols and (symbols[token]["type"] is SymType.Var or symbols[token]["type"] is SymType.Const):
out_stack.append(Node(token))
elif token in symbols and symbols[token]["type"] is SymType.Fun:
operator_stack.append(token)
elif token in symbols and symbols[token]["type"] is SymType.Operator:
while len(operator_stack) > 0 and operator_stack[-1] != '(' \
and symbols[operator_stack[-1]]["precedence"] > symbols[token]["precedence"]:
# or (symbols[operator_stack[-1]]["precedence"] == symbols[token]["precedence"] and symbols[token]["left_asoc"])):
if symbols[operator_stack[-1]]["type"] is SymType.Fun:
out_stack.append(Node(operator_stack.pop(), left=out_stack.pop()))
else:
out_stack.append(Node(operator_stack.pop(), out_stack.pop(), out_stack.pop()))
operator_stack.append(token)
else:
while len(operator_stack) > 0 and operator_stack[-1] != '(':
if symbols[operator_stack[-1]]["type"] is SymType.Fun:
out_stack.append(Node(operator_stack.pop(), left=out_stack.pop()))
else:
out_stack.append(Node(operator_stack.pop(), out_stack.pop(), out_stack.pop()))
operator_stack.pop()
if len(operator_stack) > 0 and operator_stack[-1] in symbols and symbols[operator_stack[-1]]["type"] is SymType.Fun:
out_stack.append(Node(operator_stack.pop(), left=out_stack.pop()))
return out_stack[-1]
class TreeDataset(Dataset):
def __init__(self, symbols, train, test):
self.symbols = {t["symbol"]: i for i, t in enumerate(symbols)}
self.n_symbols = len(symbols)
self.train = self.transform_trees(train)
self.test = self.transform_trees(test)
def __getitem__(self, idx):
return self.train[idx]
def __len__(self):
return len(self.train)
def transform_trees(self, tree_objects):
trees = []
for i, t in enumerate(tree_objects):
t.create_target_vector(self.symbols, self.n_symbols)
trees.append(t)
return trees
class TreeSampler(Sampler):
def __init__(self, batch_size, num_eq):
self.batch_size = batch_size
self.num_eq = num_eq
def __iter__(self):
for i in range(len(self)):
batch = np.random.randint(low=0, high=self.num_eq, size=self.batch_size)
yield batch
def __len__(self):
return self.num_eq // self.batch_size
class HVAE(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(HVAE, self).__init__()
self.encoder = Encoder(input_size, hidden_size, output_size)
self.decoder = Decoder(output_size, hidden_size, input_size)
def forward(self, tree):
mu, logvar = self.encoder(tree)
z = self.sample(mu, logvar)
out = self.decoder(z, tree)
return mu, logvar, out
def sample(self, mu, logvar):
eps = Variable(torch.randn(mu.size()))
std = torch.exp(logvar / 2.0)
return mu + eps * std
def encode(self, tree):
mu, logvar = self.encoder(tree)
return mu, logvar
def decode(self, z, symbol_dict):
return self.decoder.decode(z, symbol_dict)
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.gru = GRU221(input_size=input_size, hidden_size=hidden_size)
self.mu = nn.Linear(in_features=hidden_size, out_features=output_size)
self.logvar = nn.Linear(in_features=hidden_size, out_features=output_size)
torch.nn.init.xavier_uniform_(self.mu.weight)
torch.nn.init.xavier_uniform_(self.logvar.weight)
def forward(self, tree):
tree_encoding = self.recursive_forward(tree)
mu = self.mu(tree_encoding)
logvar = self.logvar(tree_encoding)
return mu, logvar
def recursive_forward(self, tree):
left = self.recursive_forward(tree.left) if tree.left is not None else torch.zeros(tree.target.size(0), 1, self.hidden_size)
right = self.recursive_forward(tree.right) if tree.right is not None else torch.zeros(tree.target.size(0), 1, self.hidden_size)
hidden = self.gru(tree.target, left, right)
return hidden
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.z2h = nn.Linear(input_size, hidden_size)
self.h2o = nn.Linear(hidden_size, output_size)
self.gru = GRU122(input_size=output_size, hidden_size=hidden_size)
torch.nn.init.xavier_uniform_(self.z2h.weight)
torch.nn.init.xavier_uniform_(self.h2o.weight)
def forward(self, z, tree):
hidden = self.z2h(z)
self.recursive_forward(hidden, tree)
return tree
def recursive_forward(self, hidden, tree):
prediction = self.h2o(hidden)
a = F.softmax(prediction, dim=2)
tree.prediction = prediction
if tree.left is not None or tree.right is not None:
left, right = self.gru(a, hidden)
if tree.left is not None:
self.recursive_forward(left, tree.left)
if tree.right is not None:
self.recursive_forward(right, tree.right)
def decode(self, z, symbol_dict):
hidden = self.z2h(z)
tree = self.recursive_decode(hidden, symbol_dict)
return tree
def recursive_decode(self, hidden, symbol_dict):
prediction = self.h2o(hidden)
sampled, symbol, stype = self.sample_symbol(prediction, symbol_dict)
if stype.value is SymType.Fun.value:
left, right = self.gru(F.softmax(sampled, dim=2), hidden)
l_tree = self.recursive_decode(left, symbol_dict)
r_tree = None
elif stype.value is SymType.Operator.value:
left, right = self.gru(F.softmax(sampled, dim=2), hidden)
l_tree = self.recursive_decode(left, symbol_dict)
r_tree = self.recursive_decode(right, symbol_dict)
else:
l_tree = None
r_tree = None
return Node(symbol, right=r_tree, left=l_tree)
def sample_symbol(self, prediction, symbol_dict):
sampled = F.softmax(prediction, dim=2)
sampled_symbol = torch.argmax(sampled).item()
sd = symbol_dict[sampled_symbol]
symbol = sd["symbol"]
stype = sd["type"]
# is_leaf = sd["type"] is SymType.Var or sd["type"] is SymType.Const
# is_unary = sd["type"] is SymType.Fun
return sampled, symbol, stype
class GRU221(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU221, self).__init__()
self.wir = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whr = nn.Linear(in_features=2*hidden_size, out_features=hidden_size)
self.wiz = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whz = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)
self.win = nn.Linear(in_features=input_size, out_features=hidden_size)
self.whn = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)
torch.nn.init.xavier_uniform_(self.wir.weight)
torch.nn.init.xavier_uniform_(self.whr.weight)
torch.nn.init.xavier_uniform_(self.wiz.weight)
torch.nn.init.xavier_uniform_(self.whz.weight)
torch.nn.init.xavier_uniform_(self.win.weight)
torch.nn.init.xavier_uniform_(self.whn.weight)
def forward(self, x, h1, h2):
h = torch.cat([h1, h2], dim=2)
r = torch.sigmoid(self.wir(x) + self.whr(h))
z = torch.sigmoid(self.wiz(x) + self.whz(h))
n = torch.tanh(self.win(x) + r * self.whn(h))
return (1 - z) * n + (z / 2) * h1 + (z / 2) * h2
class GRU122(nn.Module):
def __init__(self, input_size, hidden_size):
super(GRU122, self).__init__()
self.hidden_size = hidden_size
self.wir = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whr = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
self.wiz = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whz = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
self.win = nn.Linear(in_features=input_size, out_features=2*hidden_size)
self.whn = nn.Linear(in_features=hidden_size, out_features=2*hidden_size)
torch.nn.init.xavier_uniform_(self.wir.weight)
torch.nn.init.xavier_uniform_(self.whr.weight)
torch.nn.init.xavier_uniform_(self.wiz.weight)
torch.nn.init.xavier_uniform_(self.whz.weight)
torch.nn.init.xavier_uniform_(self.win.weight)
torch.nn.init.xavier_uniform_(self.whn.weight)
def forward(self, x, h):
r = torch.sigmoid(self.wir(x) + self.whr(h))
z = torch.sigmoid(self.wiz(x) + self.whz(h))
n = torch.tanh(self.win(x) + r * self.whn(h))
dh = h.repeat(1, 1, 2)
out = (1 - z) * n + z * dh
return torch.split(out, self.hidden_size, dim=2)
if __name__ == '__main__':
num_points = 1000
x = | np.random.random(10000) | numpy.random.random |
from __future__ import division, print_function
"""
This script goes through a set of synthetic fit results and, using the
final chains of the sampling along with the true data initialisation
parameters, calculates the residuals (how much I'm off by) and the
normalised residuals (if how much I think I'm off by is consistent with
how much I'm off by)
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
import pdb
import sys
import os
sys.path.insert(0, '..')
# master_pdir = "../plots/residuals_better/"
#master_pdir = "../plots/tb_synth_residuals/"
master_pdir = "../plots/residuals_better_2/"
# os.mkdir(master_pdir)
def calc_best_fit(flat_samples):
"""
Given a set of aligned (converted?) samples, calculate the median and
errors of each parameter
"""
return np.array( map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(flat_samples, [16,50,84], axis=0))))
def getInfo(result, ix=-1):
"""
"Result" is a numpy saved file from previous form of traceforward code
"""
chain = result[1]
flat_chain = chain.reshape(-1,9)
flat_chain[:,6:8] = np.exp(flat_chain[:,6:8])
best_fit = calc_best_fit(flat_chain)
mean = best_fit[0]
sigma = np.mean(best_fit[:,1:], axis=1)
group_pars = result[4]
return np.array([mean[ix], sigma[ix], group_pars[ix]])
# ------------------------------------------------------------
# ---------- Starting simple, lets plot age offsets -------
# ------------------------------------------------------------
# I will use the result from the giant exhaustive run as a starting
# point, and alter it to accommodate new synth fits
"""
ages = [5, 10, 20]
spreads = [2, 5, 10]
v_disps = [1, 2, 5]
sizes = [25, 50, 100]
precs = ['perf', 'half', 'gaia', 'double']
ages = [5, 10]
spreads = [2]
v_disps = [1]
sizes = [25]
precs = ['perf']
"""
ages = [5, 15, 30, 50]
o_spreads = [1, 5]
o_v_disps = [2, 10]
o_sizes = [25, 100]
e_spreads = [2, 10]
e_v_disps = [1, 5]
e_sizes = [50, 200]
precs = ['perf', 'half', 'gaia', 'double']
# load all the relevant data into a massive array where
# ix implicitly correspond to the value of the parameter
rdir = "../results/synth_fit/all_with_lcc_vel/"
#rdir = "../results/tb_synth_fit/"
chain_file = "final_chain.npy"
origin_file = "origins.npy"
ofits_file = rdir + 'odd_fits.npy'
ofits_w_errs_file = rdir + 'odd_fits_w_errs.npy'
efits_file = rdir + 'even_fits.npy'
efits_w_errs_file = rdir + 'even_fits_w_errs.npy'
prec_val = {'perf':0., 'half':0.5, 'gaia':1.0, 'double':2.0}
try:
o_fits = np.load(ofits_file)
o_fits_w_errs = np.load(ofits_w_errs_file)
e_fits = np.load(efits_file)
e_fits_w_errs = np.load(efits_w_errs_file)
except IOError:
#------------------------------------------------------------
#------- GATHER FITS FROM THE 'ODD' SIMULATIONS -----------
#------------------------------------------------------------
logging.info("Gathering 'ODD' simulations")
o_fits = np.zeros((len(ages), len(o_spreads), len(o_v_disps), len(o_sizes),
len(precs), 9, 3))
o_fits_w_errs = np.zeros((len(ages), len(o_spreads), len(o_v_disps), len(o_sizes),
len(precs), 9, 3))
for age_ix, age in enumerate(ages):
for spread_ix, spread in enumerate(o_spreads):
for v_disp_ix, v_disp in enumerate(o_v_disps):
for size_ix, size in enumerate(o_sizes):
for prec_ix, prec in enumerate(precs):
pdir = rdir + "{}_{}_{}_{}/{}/".format(
age, spread, v_disp, size, prec
)
try:
flat_chain = np.load(pdir + chain_file).\
reshape(-1,9)
conv_chain = np.copy(flat_chain)
conv_chain[:,6:8] = np.exp(conv_chain[:,6:8])
origin = np.load(pdir + origin_file).item()
fit_w_errs = calc_best_fit(conv_chain)
o_fits_w_errs[age_ix,spread_ix,v_disp_ix,size_ix,
prec_ix] = fit_w_errs
means = fit_w_errs[:,0]
sigs = fit_w_errs[:,1:].mean(axis=1)
key_info = np.vstack((means,sigs,
origin.pars[:-1])).T
except IOError:
print("Missing files for: {} {} {} {} {}".format(
age, spread, v_disp, size, prec
))
key_info = np.array([None]*27).reshape(9,3)
o_fits[age_ix,spread_ix,v_disp_ix,size_ix,prec_ix]=\
key_info
#------------------------------------------------------------
#------- GATHER FITS FROM THE 'EVEN' SIMULATIONS ----------
#------------------------------------------------------------
logging.info("Gathering 'EVEN' simulations")
e_fits = np.zeros((len(ages), len(e_spreads), len(e_v_disps), len(e_sizes),
len(precs), 9, 3))
e_fits_w_errs = np.zeros((len(ages), len(o_spreads), len(o_v_disps), len(o_sizes),
len(precs), 9, 3))
for age_ix, age in enumerate(ages):
for spread_ix, spread in enumerate(e_spreads):
for v_disp_ix, v_disp in enumerate(e_v_disps):
for size_ix, size in enumerate(e_sizes):
for prec_ix, prec in enumerate(precs):
pdir = rdir + "{}_{}_{}_{}/{}/".format(
age, spread, v_disp, size, prec
)
try:
flat_chain = np.load(pdir + chain_file). \
reshape(-1,9)
conv_chain = np.copy(flat_chain)
conv_chain[:,6:8] = np.exp(conv_chain[:,6:8])
origin = np.load(pdir + origin_file).item()
fit_w_errs = calc_best_fit(conv_chain)
e_fits_w_errs[age_ix,spread_ix,v_disp_ix,size_ix,
prec_ix] = fit_w_errs
means = fit_w_errs[:,0]
sigs = fit_w_errs[:,1:].mean(axis=1)
key_info = np.vstack((means,sigs,
origin.pars[:-1])).T
except IOError:
print("Missing files for: {} {} {} {} {}".format(
age, spread, v_disp, size, prec
))
key_info = np.array([None]*27).reshape(9,3)
e_fits[age_ix,spread_ix,v_disp_ix,size_ix,prec_ix]= \
key_info
np.save(ofits_file, o_fits)
np.save(ofits_w_errs_file, o_fits_w_errs)
np.save(efits_file, e_fits)
np.save(efits_w_errs_file, e_fits_w_errs)
o_res = (o_fits[:,:,:,:,:,:,0]-o_fits[:,:,:,:,:,:,2])
o_norm_res = o_res / o_fits[:,:,:,:,:,:,1]
# the fits where all parameters are within 15 sigma
o_worst_ixs = np.where(abs(o_norm_res).max(axis=-1) > 15)
o_bad_ixs = np.where(abs(o_norm_res).max(axis=-1) > 5)
o_fine_ixs = np.where(abs(o_norm_res).max(axis=-1) < 15)
o_great_ixs = np.where(abs(o_norm_res).max(axis=-1) < 5)
e_res = (e_fits[:,:,:,:,:,:,0]-e_fits[:,:,:,:,:,:,2])
e_norm_res = e_res / e_fits[:,:,:,:,:,:,1]
# the fits where all parameters are within 15 sigma
e_worst_ixs = np.where(abs(e_norm_res).max(axis=-1) > 15)
e_bad_ixs = np.where(abs(e_norm_res).max(axis=-1) > 5)
e_fine_ixs = np.where(abs(e_norm_res).max(axis=-1) < 15)
e_great_ixs = np.where(abs(e_norm_res).max(axis=-1) < 5)
#
# # Plot a multiplane plot showing histograms of ages
# # !!!!! CHANGE THIS SO IT's WORKING!!!!
# f, axes = plt.subplots(
# 1,
# 4,
# sharey=True,
# gridspec_kw = {'wspace':0, 'hspace':0}
# )
# axes[0].set_ylabel('Age offset [Myr]')
# range=[-.4,.6]
# bins = [5, 5, 5, 5]
# for i in range(len(ages)):
# axes[i].tick_params(direction='in', top=True, right=True,)
# axes[i].hist(
# np.hstack((
# # !!!!! CHANGE THIS SO IT's WORKING!!!!
# o_fits[i,0,0,:,1:,-1].flatten(),
# e_fits[i,0,0,:,1:,-1].flatten()
# )),
# bins=bins[i],
# orientation='horizontal'
# )
# axes[i].set_title('{} Myr'.format(ages[i]))
# # f.set_xlabel('True age [Myr]')
# f.set_tight_layout(tight=True)
# f.savefig('temp_plots/multipane_ages_hist.pdf')
# Plot a multiplane plot showing histograms of ages
f, axes = plt.subplots(
1,
4,
sharey=True,
gridspec_kw = {'wspace':0, 'hspace':0}
)
span = [-0.475, .575]
bins = [10, 10, 10, 10]
axes[0].set_ylabel('Age offset [Myr]')
for i in range(len(ages)):
axes[i].tick_params(direction='in', top=True, right=True,)
axes[i].hist(
np.hstack((
o_res[i,0,0,:,1:,-1].flatten(),
e_res[i,0,0,:,1:,-1].flatten()
)),
bins=bins[i],
orientation='horizontal',
range=span,
)
axes[i].set_title('{} Myr'.format(ages[i]))
# f.set_xlabel('True age [Myr]')
f.set_tight_layout(tight=True)
f.savefig('temp_plots/multipane_res_hist.pdf')
# plot a multiplane plot showing histogram of normalised resids
f, axes = plt.subplots(
1,
4,
sharey=True,
gridspec_kw = {'wspace':0, 'hspace':0}
)
# bins = [7, 8, 12, 25]
axes[0].set_ylabel('Normalised age residuals')
for i in range(len(ages)):
bin_range = [-3.25,3.25]
nbins = 13
axes[i].tick_params(direction='in', top=True, right=True)
axes[i].hist(
np.hstack((
o_norm_res[i,0,0,:,1:,-1].flatten(),
e_norm_res[i,0,0,:,1:,-1].flatten()
)),
bins=nbins,
range=bin_range,
orientation='horizontal'
)
# axes[i].set_xlim(axes[i].get_xlim()[0], axes[i].get_xlim()[1]+5)
axes[i].set_title('{} Myr'.format(ages[i]))
# f.set_xlabel('True age [Myr]')
f.set_tight_layout(tight=True)
f.savefig('temp_plots/multipane_norm_res_hist.pdf')
# ------------------------------------------------------------
# ---- PLOTTING ALL PLOTS IN ORIGINAL STYLE --------------
# ------------------------------------------------------------
if False:
# ------------------------------------------------------------
# ---- PLOTTING *ALL* NORMED RESIDUALS HIST --------------
# ------------------------------------------------------------
plt.clf()
all_norm_resids = np.hstack((o_norm_res[:,:,:,:,:,-1].flatten(),
e_norm_res[:,:,:,:,:,-1].flatten()))
plt.hist(all_norm_resids)
plt.xlabel("Normalised offset in age")
plt.ylabel("Number of simulations")
plt.savefig(master_pdir + "all-norm-age-res-hist.pdf")
#plotting histogram of all first
plt.clf()
age_resids = np.hstack((o_res[:, 0, 0, :, :, -1].flatten(),
e_res[:, 0, 0, :, :, -1].flatten()))
plt.hist(age_resids)
plt.xlabel("Raw offset in age")
plt.ylabel("Number of simulations")
plt.savefig(master_pdir+"all-raw-age-low-dv-low-dx-hist.pdf")
# ------------------------------------------------------------
# ---- PLOTTING RAW RESIDUALS WITH LOW DV HIST -------------
# ------------------------------------------------------------
plt.clf()
all_low_dv_raw_resids = np.hstack((o_res[:,:,0,:,:,-1].flatten(),
e_res[:,:,0,:,:,-1].flatten()))
plt.hist(all_low_dv_raw_resids)
plt.xlabel("Raw age offset [Myr]")
plt.ylabel("Number of simulations")
plt.savefig(master_pdir + "all-raw-age-res-low-dv-hist.pdf")
# ------------------------------------------------------------
# ---- PLOTTING *ALL* NORMED RESIDUALS V PARS --------------
# ------------------------------------------------------------
plt.clf()
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(7,7), sharey=True)
# PLOTTING ALL RESIDUALS AS FUNCTION OF TRUE AGE
ax = axs[0,0]
age_fit_means = []
age_fit_stds = []
for i, age in enumerate(ages):
age_norm_resids = np.hstack((o_norm_res[i,:,:,:,:,-1].flatten(),
e_norm_res[i,:,:,:,:,-1].flatten()))
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
ax.errorbar(ages, age_fit_means, yerr=age_fit_stds, fmt='b.')
ax.plot(ages, np.zeros(len(ages)), color='b', ls=':')
ax.set_xlabel("True age [Myr]")
ax.set_ylabel("Normalised offset in age")
# PLOTTING ALL RESIDUALS AS FUNCTION OF STAR COUNT
ax = axs[0,1]
age_fit_means = []
age_fit_stds = []
sizes = []
for i, size in enumerate(zip(o_sizes, e_sizes)):
age_norm_resids = o_norm_res[:,:,:,:,i,-1]
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
sizes.append(size[0])
age_norm_resids = e_norm_res[:,:,:,:,i,-1]
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
sizes.append(size[1])
ax.errorbar(sizes, age_fit_means, yerr=age_fit_stds, fmt='b.')
ax.plot(sizes, np.zeros(len(sizes)), color='b', ls=':')
ax.set_xlabel("Star count")
# PLOTTING ALL RESIDUALS AS FUNCTION OF DX
ax = axs[1,0]
age_fit_means = []
age_fit_stds = []
spreads = []
for i, spread in enumerate(zip(o_spreads, e_spreads)):
age_norm_resids = o_norm_res[:,i,:,:,:,-1]
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
spreads.append(spread[0])
age_norm_resids = e_norm_res[:,i,:,:,:,-1]
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
spreads.append(spread[1])
ax.errorbar(spreads, age_fit_means, yerr=age_fit_stds, fmt='b.')
ax.plot(spreads, np.zeros(len(spreads)), color='b', ls=':')
ax.set_xlabel("Spread [pc]")
ax.set_ylabel("Normalised offset in age")
# PLOTTING ALL RESIDUALS AS FUNCTION OF DV
ax = axs[1,1]
age_fit_means = []
age_fit_stds = []
v_disps = []
for i, v_disp in enumerate(zip(o_v_disps, e_v_disps)):
# Flipped the order cause e_v_disps has the lower v_disps
age_norm_resids = e_norm_res[:,:,i,:,:,-1]
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
v_disps.append(e_v_disps[i])
age_norm_resids = o_norm_res[:,:,i,:,:,-1]
age_fit_means.append(np.mean(age_norm_resids))
age_fit_stds.append(np.std(age_norm_resids))
v_disps.append(o_v_disps[i])
ax.errorbar(v_disps, age_fit_means, yerr=age_fit_stds, fmt='b.')
ax.plot(v_disps, np.zeros(len(v_disps)), color='b', ls=':')
ax.set_xlabel("Velocity dispersion [km/s]")
# PLOTTING ALL RESIDUALS AS FUNCTION OF PRECISION
ax = axs[2,0]
age_fit_means = []
age_fit_stds = []
vals = []
for i, prec in enumerate(precs):
vals.append(prec_val[prec])
age_norm_resids = np.hstack((o_norm_res[:,:,:,:,i,-1].flatten(),
e_norm_res[:,:,:,:,i,-1].flatten()))
age_fit_means.append( | np.mean(age_norm_resids) | numpy.mean |
from collections import OrderedDict, defaultdict
import pandas as pd
import cv2
import torch
import os
import yaml
import numpy as np
import json
from tqdm import tqdm
from shutil import copy
from ast import literal_eval
import SimpleITK as sitk
import pydicom
import nibabel as nib
import matplotlib.pyplot as plt
from matplotlib import cm
import albumentations
from torchvision.utils import make_grid
from pathlib import Path
from adpkd_segmentation.config.config_utils import get_object_instance # noqa
from adpkd_segmentation.datasets import dataloader as _dataloader # noqa
from adpkd_segmentation.datasets import datasets as _datasets # noqa
from adpkd_segmentation.data.link_data import makelinks # noqa
from adpkd_segmentation.data.data_utils import display_sample # noqa
from adpkd_segmentation.utils.train_utils import load_model_data # noqa
from adpkd_segmentation.utils.stats_utils import ( # noqa
bland_altman_plot,
scatter_plot,
linreg_plot,
)
from adpkd_segmentation.utils.losses import (
SigmoidBinarize,
Dice,
binarize_thresholds,
)
IOP = "IOP"
IPP = "IPP"
IPP_dist = "IPP_dist"
def load_config(config_path, run_makelinks=False, inference_path=None):
"""Reads config file and calculates additional dcm attributes such as
slice volume. Returns a dictionary used for patient wide calculations
such as TKV.
Args:
config_path (str): config file path
run_makelinks (bool, optional): Creates symbolic links during the first run. Defaults to False.
Returns:
dataloader, model, device, binarize_func, save_dir (str), model_name (str), split (str)
"""
if run_makelinks:
makelinks()
with open(config_path, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
model_config = config["_MODEL_CONFIG"]
loader_to_eval = config["_LOADER_TO_EVAL"]
dataloader_config = config[loader_to_eval]
# replace inference_path in config if one is provided
if inference_path is not None:
dataloader_config["dataset"]["inference_path"] = inference_path
saved_checkpoint = config["_MODEL_CHECKPOINT"]
checkpoint_format = config["_NEW_CKP_FORMAT"]
model = get_object_instance(model_config)()
if saved_checkpoint is not None:
load_model_data(saved_checkpoint, model, new_format=checkpoint_format)
dataloader = get_object_instance(dataloader_config)()
print(f"Images in inference input= {len(dataloader.dataset)}")
# TODO: support other metrics as needed
# binarize_func = SigmoidBinarize(thresholds=[0.5])
pred_process_config = config["_LOSSES_METRICS_CONFIG"]["criterions_dict"][
"dice_metric"
]["pred_process"]
pred_process = get_object_instance(pred_process_config)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval()
model_name = Path(config_path).absolute().parts[-3]
save_dir = "./saved_inference"
res = {
"dataloader": dataloader,
"model": model,
"device": device,
"binarize_func": pred_process,
"save_dir": save_dir,
"model_name": model_name,
}
return res
def plot_model_results(csv_path, name):
df = pd.read_csv(csv_path)
pred = df["TKV_Pred"].to_numpy()
gt = df["TKV_GT"].to_numpy()
bland_altman_plot(
pred, gt, percent=True, title=f"{name} BA Plot: TKV % error"
)
patient_dice = df["patient_dice"].to_numpy()
scatter_plot(patient_dice, gt, title=f"{name} Dice by TKV")
linreg_plot(pred, gt, title=f"{name} Linear Fit")
def inference_to_disk(
dataloader,
model,
device,
binarize_func,
save_dir="./saved_inference",
model_name="model",
):
"""
Generates inferences from InferenceDataloader.
Args:
dataloader (dataloader): Dataloader instance for an InferenceDataset.
model (model): Dataloader instance.
device (device): Device instance.
binarize_func (function): Binarizing function.
save_dir (str, optional): Directory to save inference. Defaults to "./saved_inference".
model_name (str, optional): Name of model. Defaults to "model".
"""
dataset = dataloader.dataset
output_idx_check = (
hasattr(dataloader.dataset, "output_idx")
and dataloader.dataset.output_idx
)
assert (
output_idx_check is True
), "output indexes are required for the dataset"
for batch_idx, output in enumerate(dataloader):
x_batch, idxs_batch = output
x_batch = x_batch.to(device)
with torch.no_grad():
# get_verbose returns (sample, dcm_path, attributes dict)
dcm_file_paths = [
Path(dataset.get_verbose(idx)[1]) for idx in idxs_batch
]
dcm_file_names = [
Path(dataset.get_verbose(idx)[1]).stem for idx in idxs_batch
]
file_attribs = [dataset.get_verbose(idx)[2] for idx in idxs_batch]
y_batch_hat = model(x_batch)
# TODO: support only sigmoid saves
y_batch_hat_binary = binarize_func(y_batch_hat)
for dcm_path, dcm_name, file_attrib, img, logit, pred in zip(
dcm_file_paths,
dcm_file_names,
file_attribs,
x_batch,
y_batch_hat,
y_batch_hat_binary,
):
out_dir = (
Path.cwd()
/ Path(save_dir)
/ model_name
/ file_attrib["patient"]
/ file_attrib["MR"]
/ dcm_name
)
out_dir.parent.mkdir(parents=True, exist_ok=True)
# print(out_dir)
np.save(str(out_dir) + "_img", img.cpu().numpy())
np.save(str(out_dir) + "_logit", logit.cpu().numpy())
np.save(str(out_dir) + "_pred", pred.cpu().numpy())
copy(dcm_path, out_dir.parent / (out_dir.name + "_DICOM.dcm"))
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# get resize transform within compose object
Resize = albumentations.augmentations.transforms.Resize
transform_resize = next(
v
for v in dataloader.dataset.augmentation.transforms
if isinstance(v, Resize)
)
assert (
transform_resize is not None
), "transform_resize must be defined"
file_attrib["transform_resize_dim"] = (
transform_resize.height,
transform_resize.width,
)
attrib_json = json.dumps(file_attrib, cls=NpEncoder)
f = open(str(out_dir) + "_attrib.json", "w")
f.write(attrib_json)
f.close()
# %%
def inference_to_nifti(inference_dir, inverse_crop_ratio=640 / 512):
"""exports directory dicom files to formated nifti volume.
calls sorting helper function
Args:
inference_dir (str, optional): inference directory.
Returns:
pd.DataFrame: Dataframe containing sorted values.
"""
# get inference paths
preds = Path(inference_dir).glob("*_pred.npy")
dcm_paths = Path(inference_dir).glob("*.dcm")
preds = sorted(preds, key=lambda x: x.name)
dcm_paths = sorted(dcm_paths, key=lambda x: x.name)
dcms = [pydicom.read_file(p) for p in dcm_paths]
out_folder = "ITKSNAP_DCM_NIFTI"
# prepare data and sort based on IOP/IPP
IOPs = [d.ImageOrientationPatient for d in dcms]
IPPs = [d.ImagePositionPatient for d in dcms]
data = {"preds": preds, "dcm_paths": dcm_paths, IOP: IOPs, IPP: IPPs}
sorted_df = IOP_IPP_dicomsort(pd.DataFrame(data))
# use SITK to generate numpy from dicom header
reader = sitk.ImageSeriesReader()
sorted_dcms_paths = [str(p) for p in sorted_df["dcm_paths"]]
reader.SetFileNames(sorted_dcms_paths)
errors = []
try:
image_3d = reader.Execute()
except Exception as e:
errors.append(f"error:{str(e)}\n path:{dcm_paths[0]}")
out_dir = dcm_paths[0].parent / out_folder
os.makedirs(out_dir, exist_ok=True)
dcm_save_name = "dicom_vol.nii"
pred_save_name = "pred_vol.nii"
sitk.WriteImage(
image_3d,
str(out_dir / dcm_save_name),
)
# load saved saved nii volume into nibabel object
dcm_nii_vol = nib.load(out_dir / dcm_save_name)
npy_preds = [np.squeeze(np.load(Path(p))) for p in sorted_df["preds"]]
# reverse center crop -- use idx 0 to get shape
pad_width = (
(npy_preds[0].shape[0] * inverse_crop_ratio) - (npy_preds[0].shape[0])
) / 2
pad_width = round(pad_width)
npy_reverse_crops = [np.pad(pred, pad_width) for pred in npy_preds]
# resize predictions to match dicom
x_y_dim = dcm_nii_vol.get_fdata().shape[0:2] # shape is in x, y, z
resized_preds = [
cv2.resize(orig, (x_y_dim), interpolation=cv2.INTER_NEAREST)
for orig in npy_reverse_crops
]
corrected_transpose = [np.transpose(r) for r in resized_preds]
# convert 2d npy to 3d npy volume
npy_pred_vol = np.stack(corrected_transpose, axis=-1).astype(np.uint16)
# create nifti mask for predictions
dicom_header = dcm_nii_vol.header.copy()
pred_nii_vol = nib.Nifti1Image(npy_pred_vol, None, header=dicom_header)
nib.save(pred_nii_vol, out_dir / pred_save_name)
print(f"Wrote to: {Path(str(out_dir / dcm_save_name))}")
return pred_nii_vol, dcm_nii_vol
# %%
def resized_stack(numpy_list, dsize=None):
"""resizing lists of array with dimension:
slices x 1 x H x W, where H = W.
Sets output size to first array at idx 0 or dsize
Args:
numpy_list (list): list of numpy arr
dsize (int, optional): output dimension. Defaults to None.
Returns:
numpy: stacked numpy lists with same size
"""
assert numpy_list[0].shape[1] == 1, "dimension check"
assert numpy_list[0].shape[2] == numpy_list[0].shape[3], "square check"
def reshape(arr):
"""reshapes [slices x 1 x H x W] to [H x W x slices]"""
arr = np.moveaxis(arr, 0, -1) # slices to end
arr = np.squeeze(arr) # remove 1 dimension
return arr
reshaped = [reshape(arr) for arr in numpy_list]
if dsize is None:
dsize = reshaped[0].shape[0:2] # get H, W from first arr
resized = [
cv2.resize(src, dsize, interpolation=cv2.INTER_CUBIC)
for src in reshaped
]
return np.stack(resized)
def display_volumes(
study_dir,
style="prob",
plot_error=False,
skip_display=True,
save_dir=None,
output_style="png",
):
"""Displays inference over original image.
Note: skip_display should be set to true to save figs.
Args:
study_dir (path): Directory of inferences.
style (str, optional): Type of data displayed.
Defaults to "prob" for probability.
plot_error (bool, optional): Display error. Defaults to False.
skip_display (bool, optional): Display console display.
save_dir (path, optional): Directory to save figs. Defaults to None.
Returns:
dict: Dictionary of images, logits, predictions, probs
"""
print(f"loading from {study_dir}")
study_dir = Path(study_dir)
imgs = sorted(study_dir.glob("*_img.npy"))
imgs_np = [np.load(i) for i in imgs]
logits = sorted(study_dir.glob("*_logit.npy"))
logits_np = [np.load(logit) for logit in logits]
preds = sorted(study_dir.glob("*_pred.npy"))
preds_np = [np.load(p) for p in preds]
vols = {
"img": np.stack(imgs_np),
"logit": np.stack(logits_np),
"pred": | np.stack(preds_np) | numpy.stack |
"""
This file warp around the nuscenes dataset => We only focus on what we want
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# insert system path for fast debugging
import sys
sys.path.insert(0, "../")
from nuscenes.nuscenes import NuScenes
from nuscenes.nuscenes import NuScenesExplorer
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, PointCloud
from nuscenes.utils.geometry_utils import view_points, transform_matrix
from config.config_nuscenes import config_nuscenes as cfg
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
from pyquaternion import Quaternion
from PIL import Image
from functools import reduce
import math
import os
import pickle
import numpy as np
from config.config_nuscenes import config_nuscenes as cfg
from misc.devkit.python.read_depth import depth_read
# Define the kitti dataset abstraction object
class Nuscenes_dataset(object):
# Initialize the dataset
def __init__(self, mode="mini") -> None:
# Check mode
if not mode in ["mini", "full"]:
raise ValueError("[Error] Unknow nuscene dataset mode. Consider using 'mini' or 'full'")
self.mode = mode
# Initialize nuscenes dataset API
print("[Info] Initializing Nuscenes official database...")
if self.mode == "mini":
dataroot = os.path.join(cfg.DATASET_ROOT, "v1.0-mini")
self.dataset = NuScenes(version="v1.0-mini", dataroot=dataroot)
elif self.mode == "full":
# dataroot = os.path.join(cfg.DATASET_ROOT, "v1.0-trainval")
dataroot = cfg.DATASET_ROOT
self.dataset = NuScenes(version="v1.0-trainval", dataroot=dataroot)
self.explorer = NuScenesExplorer(self.dataset)
print("[Info] Finished initializing Nuscenes official database!")
# ipdb.set_trace()
# Initialize some tables
self.samples = self.dataset.sample
self.scenes = self.dataset.scene
self.num_scenes = len(self.scenes)
self.num_samples = len(self.scenes)
self.train_val_table = self.get_train_val_table()
# Train related attributes
self.train_samples = self.train_val_table["train_samples"]
self.train_sample_tokens = self.train_val_table["train_sample_tokens"]
# Val related attributes
self.val_samples = self.train_val_table["val_samples"]
self.val_sample_tokens = self.train_val_table["val_sample_tokens"]
# Define camera keywords
self.camera_orientations = ["front", "front_right", "front_left", "back_right", "back_left", "back"]
self.orientation_to_camera = {
"front": "CAM_FRONT",
"front_right": "CAM_FRONT_RIGHT",
"front_left": "CAM_FRONT_LEFT",
"back_right": "CAM_BACK_RIGHT",
"back_left": "CAM_BACK_LEFT",
"back": "CAM_BACK"
}
# Define radar keywords
self.radar_orientations = ['front', 'front_right', 'front_left', 'back_right', 'back_left']
self.radar_keywords = ['RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT',
'RADAR_BACK_RIGHT', 'RADAR_BACK_LEFT']
self.orientation_to_radar = {
"front": 'RADAR_FRONT',
"front_right": 'RADAR_FRONT_RIGHT',
"front_left": 'RADAR_FRONT_LEFT',
"back_right": 'RADAR_BACK_RIGHT',
"back_left": 'RADAR_BACK_LEFT'
}
# Now based on dataset version, we include different orientations
# print(cfg.version)
assert cfg.version in ["ver1", "ver2", "ver3"]
self.version = cfg.version
print("===============================")
print("[Info] Use dataset %s" % (self.version))
self.train_datapoints = self.get_datapoints("train")
self.val_datapoints = self.get_datapoints("val")
print("\t train datapoints: %d" % (len(self.train_datapoints)))
print("\t val datapoints: %d" % (len(self.val_datapoints)))
print("===============================")
# Initialize how many sweeps to use
self.lidar_sweeps = cfg.lidar_sweeps
self.radar_sweeps = cfg.radar_sweeps
####################################
## Some dataset manipulation APIs ##
####################################
# Easier call for get
def get(self, table_name, token):
return self.dataset.get(table_name, token)
# Easier call for get_sample_data
def get_sample_data(self, sample_data_token):
return self.dataset.get_sample_data(sample_data_token)
# Generate the train / val table based on the random seed
def get_train_val_table(self):
seed = cfg.TRAIN_VAL_SEED
val_num = int(cfg.VAL_RATIO * self.num_scenes)
# Get train / val scenes
all = set(list(range(self.num_scenes)))
np.random.seed(seed)
val = set(np.random.choice(np.arange(0, self.num_scenes, 1), val_num, replace=False))
train = all - val
# Split number set
train_scenes = list(train)
val_scenes = list(val)
# Token list
train_scene_tokens = [self.scenes[_]["token"] for _ in train_scenes]
val_scene_tokens = [self.scenes[_]["token"] for _ in val_scenes]
# object list
train_scene_objs = [self.scenes[_] for _ in train_scenes]
val_scene_objs = [self.scenes[_] for _ in val_scenes]
# sample token list
train_sample_tokens = []
for scene_obj in train_scene_objs:
train_sample_tokens += self.get_sample_tokens_from_scene(scene_obj)
val_sample_tokens = []
for scene_obj in val_scene_objs:
val_sample_tokens += self.get_sample_tokens_from_scene(scene_obj)
# sample list
train_samples = []
for scene_obj in train_scene_objs:
train_samples += self.get_samples_from_scene(scene_obj)
val_samples = []
for scene_obj in val_scene_objs:
val_samples += self.get_samples_from_scene(scene_obj)
return {
"train_scene_tokens": train_scene_tokens,
"train_scene_objs": train_scene_objs,
"train_sample_tokens": train_sample_tokens,
"train_samples": train_samples,
"val_scene_tokens": val_scene_tokens,
"val_scene_objs": val_scene_objs,
"val_sample_tokens": val_sample_tokens,
"val_samples": val_samples
}
# Get all samples in one scene
def get_samples_from_scene(self, scene_obj):
# Check if scene is valid
if not self.check_scene_obj(scene_obj):
raise ValueError
sample_lst = []
current_sample_token = scene_obj['first_sample_token']
while True:
current_sample = self.get("sample", current_sample_token)
sample_lst.append(current_sample)
if not current_sample["next"] == "":
current_sample_token = current_sample["next"]
else:
break
return sample_lst
# Get all samples in one scene
def get_sample_tokens_from_scene(self, scene_obj):
# Check if scene is valid
if not self.check_scene_obj(scene_obj):
raise ValueError
sample_token_lst = []
current_sample_token = scene_obj['first_sample_token']
while True:
current_sample = self.get("sample", current_sample_token)
sample_token_lst.append(current_sample["token"])
if not current_sample["next"] == "":
current_sample_token = current_sample["next"]
else:
break
return sample_token_lst
########################
## Torch dataset APIs ##
########################
# Add orientations to datapoints
def get_datapoints(self, mode="train"):
datapoints = []
# Get orientations given dataset version
if self.version in ["ver1", "ver3"]:
orientations = ["front", "back"]
elif self.version == "ver2":
orientations = self.camera_orientations
if mode == "train":
samples = self.train_samples
elif mode == "val":
samples = self.val_samples
for idx, sample in enumerate(samples):
for ori in orientations:
datapoint = [idx, ori]
datapoints.append(datapoint)
return datapoints
# Get data given corresponding datapoints
def get_data(self, datapoint, mode="train"):
assert mode in ["train", "val"]
# Check if datapoint is valid
if mode == "train":
assert datapoint in self.train_datapoints
else:
assert datapoint in self.val_datapoints
sample_index = datapoint[0]
orientation = datapoint[1]
if mode == "train":
sample_obj = self.train_samples[sample_index]
else:
sample_obj = self.val_samples[sample_index]
lidar_data = self.get_lidar_depth_map_multi_bidirectional(sample_obj, orientation,
num_sweeps=self.lidar_sweeps)
radar_data = self.get_radar_depth_map_multi_bidirectional(sample_obj, orientation,
num_sweeps=self.radar_sweeps)
return {
"lidar_depth": lidar_data["depth"],
"lidar_points": lidar_data["points"],
"lidar_depth_points": lidar_data["depth_points"],
"radar_depth": radar_data["depth"],
"radar_points": radar_data["points"],
"radar_depth_points": radar_data["depth_points"],
"radar_raw_points": radar_data["raw_points"],
"image": lidar_data["image"]
}
# Get dataset length given the mode
def get_dataset_length(self, mode="train"):
assert mode in ["train", "val"]
if mode == "train":
return len(self.train_datapoints)
else:
return len(self.val_datapoints)
#####################################
## LiDAR point cloud manipulations ##
#####################################
# Plot point cloud
def get_lidar_depth_map(self, sample_obj, orientation):
# Check the input orientation
assert self.check_camera_orientation(orientation) == True
# Get sensor tokens
lidar_token = sample_obj['data']['LIDAR_TOP']
orientation_key = self.map_camera_keyword(orientation)
camera_front_token = sample_obj['data'][orientation_key]
# Get projected point cloud and depth
points, depth_points, image = self.explorer.map_pointcloud_to_image(lidar_token, camera_front_token)
# Construct detph map
image = np.array(image)
depth = np.zeros(image.shape[:2])
# Assign depth value to corresponding pixel on the image
depth_loc = (points[:2, :].T).astype(np.int32)
depth[depth_loc[:, 1], depth_loc[:, 0]] = depth_points
return {
"image": image,
"depth": depth,
"points": points,
"depth_points": depth_points
}
# Plot lidar depth map with multiple sweeps
def get_lidar_depth_map_multi(self, sample_obj, orientation, num_sweeps=10):
# Check the input orientation
assert self.check_camera_orientation(orientation) == True
# Get sensor tokens
lidar_token = sample_obj['data']['LIDAR_TOP']
lidar_record = self.dataset.get("sample_data", lidar_token)
# Get camera token
orientation_key = self.map_camera_keyword(orientation)
camera_front_token = sample_obj['data'][orientation_key]
camera_record = self.dataset.get("sample_data", camera_front_token)
im = Image.open(os.path.join(self.dataset.dataroot, camera_record['filename']))
# Get multiple sweeps
point_clouds, times = LidarPointCloud.from_file_multisweep(self.dataset,
sample_obj, lidar_record["channel"],
"LIDAR_TOP", nsweeps=num_sweeps)
# First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
cs_record = self.dataset.get('calibrated_sensor', lidar_record['calibrated_sensor_token'])
point_clouds.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
point_clouds.translate(np.array(cs_record['translation']))
# Second step: transform to the global frame.
poserecord = self.dataset.get('ego_pose', lidar_record['ego_pose_token'])
point_clouds.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
point_clouds.translate(np.array(poserecord['translation']))
# Third step: transform into the ego vehicle frame for the timestamp of the image.
poserecord = self.dataset.get('ego_pose', camera_record['ego_pose_token'])
point_clouds.translate(-np.array(poserecord['translation']))
point_clouds.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)
# Fourth step: transform into the camera.
cs_record = self.dataset.get('calibrated_sensor', camera_record['calibrated_sensor_token'])
point_clouds.translate(-np.array(cs_record['translation']))
point_clouds.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = point_clouds.points[2, :]
# Take the actual picture (matrix multiplication with camera-matrix + renormalization).
points = view_points(point_clouds.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)
# Mask the depth that is not in the field of view
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > 0)
mask = np.logical_and(mask, points[0, :] > 1)
mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
mask = np.logical_and(mask, points[1, :] > 1)
mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
points = points[:, mask]
depths = depths[mask]
# Construct detph map
image = np.array(im)
depth = | np.zeros(image.shape[:2]) | numpy.zeros |
import os
import random
import datetime
import argparse
import time
import argparse
import numpy as np
from torchvision import models
import torch.nn as nn
import torch
import random
import dlib
import cv2
import imutils
from imutils.video import VideoStream
from imutils import face_utils
from moviepy.editor import *
from moviepy.editor import VideoFileClip, concatenate_videoclips
##############################################
# #
# generate stagemix #
# #
##############################################
class RandomDistance:
def distance(self, reference_clip, compare_clip, args):
dur_end = min(reference_clip.duration, compare_clip.duration)
return random.randrange(1,100), min(dur_end, random.randrange(3,7)), {}
class FaceDistance:
def __init__(self, shape_predictor_path):
self.skip_frame_rate = 4 # 'the number of frames to skip'
self.minimax_frames = 5 # 'the number of frames to minimax distance'
# https://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/
self.shape_predictor = shape_predictor_path
def extract_landmark(self, reference_clip, compare_clip):
self.clips =[reference_clip, compare_clip]
# face detect
detector = dlib.get_frontal_face_detector()
# face landmark
predictor = dlib.shape_predictor(self.shape_predictor)
# reference, compare 영상의 landmark 추출하기
clips_frame_info = []
for clip in self.clips:
i=0
every_frame_info= []
# for every frame
while True:
frame = clip.get_frame(i*1.0/clip.fps)
i+=self.skip_frame_rate # skip frames
if (i*1.0/clip.fps)> clip.duration:
break
# resizing width & convert to gray scale
frame = imutils.resize(frame, width=800)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# face rect detect
rects = detector(gray, 0)
# if there is face recognized
if len(rects)>0:
# find the largest face rect
max_width = 0
max_rect = None
for rect in rects:
if int(rects[0].width()) > max_width:
max_rect = rect
# face landmark coordinate: (x, y)
shape = predictor(gray, max_rect)
shape = face_utils.shape_to_np(shape)
every_frame_info.append(shape)
else:
every_frame_info.append([])
clips_frame_info.append(np.array(every_frame_info))
cv2.destroyAllWindows()
return clips_frame_info
def get_all_frame_distance(self, clips_frame_info, min_size):
dist_arr = []
# Calculate distance by frame
for i in range(min_size-1):
if len(clips_frame_info[0][i])>0 and len(clips_frame_info[1][i+1])>0: # 얼굴 둘다 있으면
# 두 영상에서 눈의 거리(왼쪽 눈 끼리, 오른쪽 눈 끼리)
l = 36 # 왼쪽 눈 왼쪽 끝
r = 45 # 오른쪽 눈3 오른쪽 끝
left_eye = ((clips_frame_info[0][i][l][0] - clips_frame_info[1][i+1][l][0])**2 + (clips_frame_info[0][i][l][1] - clips_frame_info[1][i+1][l][1])**2)**0.5
right_eye = ((clips_frame_info[0][i][r][0] - clips_frame_info[1][i+1][r][0])**2 + (clips_frame_info[0][i][r][1] - clips_frame_info[1][i+1][r][1])**2)**0.5
total_diff = left_eye + right_eye
dist_arr.append(total_diff)
else:
dist_arr.append(None)
return dist_arr
def distance(self, reference_clip, compare_clip, args):
time.sleep(2.0)
clips_frame_info = self.extract_landmark(reference_clip, compare_clip) # 모든 프레임마다 길이 계산해줌
min_size = min(len(clips_frame_info[0]),len(clips_frame_info[1]))
dist_arr = self.get_all_frame_distance(clips_frame_info, min_size)
clips =[reference_clip,compare_clip]
# Minimize max distance in (minimax_frames) frames
minimax_frames = self.minimax_frames
min_diff = np.float('Inf')
min_idx = 0
max_dist = []
for i in range(min_size - (minimax_frames - 1)): # 해당 frame "이전과 이후"에 frame들 확인
start_minmax_idx = 0 if (i - minimax_frames)<0 else i - minimax_frames
if (None in dist_arr[start_minmax_idx :i + minimax_frames]):
max_dist.append(None)
else:
tmp_max = np.max(dist_arr[start_minmax_idx:i + minimax_frames])
max_dist.append(tmp_max)
if min_diff > tmp_max:
min_diff = tmp_max
min_idx = i
# return distance, second, additional_info
return min_diff, (min_idx*self.skip_frame_rate)/self.clips[0].fps, {}
class PoseDistance:
def __init__(self):
self.SKIP_FRAME_RATE = 10
self.MINIMAX_FRAME = 4
# 함수에서 documentaiton 읽기
self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
self.model.eval()
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def extract_boxes(self, reference_clip, compare_clip):
self.clips = [reference_clip, compare_clip]
clips_frame_info = []
for clip in self.clips:
i = 0
every_frame_info = []
# loop over the frames from the video stream
while True:
i+=self.SKIP_FRAME_RATE # 1초에 60 fps가 있으므로 몇개는 skip해도 될거 같음!
if (i*1.0/clip.fps)> clip.duration:
break
frame = clip.get_frame(i*1.0/clip.fps)
frame = imutils.resize(frame, width=640)
frame = frame/255 # image, and should be in ``0-1`` range.
frame = np.transpose(frame, (2,0,1)) # HWC -> CHW(그 위치에 몇차원 애를 넣을거냔?)
x = [torch.from_numpy(frame).float()]
# label list https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt
predictions = self.model(x)
prediction= predictions[0]
each_box_list = zip(prediction['boxes'].tolist(), prediction['labels'].tolist(), prediction['scores'].tolist())
# 0.95 정도 올려야 까맣게 보이는 관중이 없어짐!
filtered_box_list = filter(lambda x: x[1]==1 and x[2] >= 0.95, each_box_list)
filtered_center_dot_list = list(map(lambda x: [(x[0][0]+x[0][2])/2, (x[0][1]+x[0][3])/2], filtered_box_list))
# x좌표로 정렬하기(대형이 가로로 늘어져 있다고 가정하고 순서대로 정렬)
sorted_dot_list = sorted(filtered_center_dot_list, key = lambda x: x[0])
every_frame_info.append(sorted_dot_list) # 프레임별 정보
clips_frame_info.append( | np.array(every_frame_info) | numpy.array |
import numpy as np
from .. import evaluate as ev
class Null(object):
def __init__(self, *args, **kwargs):
self.default_cache = 'feature-cache'
def __call__(self, g, n1, n2=None):
return self.compute_features(g, n1, n2)
def write_fm(self, json_fm={}):
return json_fm
def compute_features(self, g, n1, n2=None):
if n2 is None:
c1 = g.node[n1][self.default_cache]
return self.compute_node_features(g, n1, c1)
if g.node[n1]['size'] > g.node[n2]['size']:
n1, n2 = n2, n1 # smaller node first
c1, c2, ce = [d[self.default_cache] for d in
[g.node[n1], g.node[n2], g[n1][n2]]]
return np.concatenate((
self.compute_node_features(g, n1, c1),
self.compute_node_features(g, n2, c2),
self.compute_edge_features(g, n1, n2, ce),
self.compute_difference_features(g, n1, n2, c1, c2)
))
def create_node_cache(self, *args, **kwargs):
return np.array([])
def create_edge_cache(self, *args, **kwargs):
return np.array([])
def update_node_cache(self, *args, **kwargs):
pass
def update_edge_cache(self, *args, **kwargs):
pass
def compute_node_features(self, *args, **kwargs):
return np.array([])
def compute_edge_features(self, *args, **kwargs):
return np.array([])
def compute_difference_features(self, *args, **kwargs):
return np.array([])
class Composite(Null):
def __init__(self, children=[], *args, **kwargs):
super(Composite, self).__init__()
self.children = children
def write_fm(self, json_fm={}):
for child in self.children:
json_fm.update(child.write_fm(json_fm))
return json_fm
def create_node_cache(self, *args, **kwargs):
return [c.create_node_cache(*args, **kwargs) for c in self.children]
def create_edge_cache(self, *args, **kwargs):
return [c.create_edge_cache(*args, **kwargs) for c in self.children]
def update_node_cache(self, g, n1, n2, dst, src):
for i, child in enumerate(self.children):
child.update_node_cache(g, n1, n2, dst[i], src[i])
def update_edge_cache(self, g, e1, e2, dst, src):
for i, child in enumerate(self.children):
child.update_edge_cache(g, e1, e2, dst[i], src[i])
def compute_node_features(self, g, n, cache=None):
if cache is None: cache = g.node[n][self.default_cache]
features = []
for i, child in enumerate(self.children):
features.append(child.compute_node_features(g, n, cache[i]))
return np.concatenate(features)
def compute_edge_features(self, g, n1, n2, cache=None):
if cache is None: cache = g[n1][n2][self.default_cache]
features = []
for i, child in enumerate(self.children):
features.append(child.compute_edge_features(g, n1, n2, cache[i]))
return np.concatenate(features)
def compute_difference_features(self, g, n1, n2, cache1=None, cache2=None):
if cache1 is None: cache1 = g.node[n1][self.default_cache]
if cache2 is None: cache2 = g.node[n2][self.default_cache]
features = []
for i, child in enumerate(self.children):
features.append(child.compute_difference_features(
g, n1, n2, cache1[i], cache2[i]))
return np.concatenate(features)
def _compute_delta_vi(ctable, fragments0, fragments1):
c0 = np.sum(ctable[list(fragments0)], axis=0)
c1 = np.sum(ctable[list(fragments1)], axis=0)
cr = c0 + c1
p0 = np.sum(c0)
p1 = np.sum(c1)
pr = np.sum(cr)
p0g = np.sum(ev.xlogx(c0))
p1g = np.sum(ev.xlogx(c1))
prg = np.sum(ev.xlogx(cr))
return (pr * np.log2(pr) - p0 * np.log2(p0) - p1 * np.log2(p1) -
2 * (prg - p0g - p1g))
class Mock(Null):
'''
Mock feature manager to verify agglomerative learning works.
This manager learns a different feature map for fragments vs
agglomerated segments. It relies on knowing the ground truth for a
given fragmentation.
Parameters
----------
frag, gt : array of int, same shape
The fragmentation and ground truth volumes. Must have same shape.
'''
def __init__(self, frag, gt):
super().__init__()
self.ctable = ev.contingency_table(frag, gt, ignore_seg=[],
ignore_gt=[]).toarray()
self._std = 0.1 # standard deviation of feature computations
def eps(self):
return np.random.randn(2) * self._std
def compute_features(self, g, n1, n2=None):
if n2 is None:
return np.array([])
f1, f2 = g.node[n1]['fragments'], g.node[n2]['fragments']
should_merge = _compute_delta_vi(self.ctable, f1, f2) < 0
if should_merge:
return np.array([0., 0.]) + self.eps()
else:
if len(f1) + len(f2) == 2: # single-fragment merge
return np.array([1., 0.]) + self.eps()
else: # multi-fragment merge
return | np.array([0., 1.]) | numpy.array |
from inspect import trace
import os
import glob
import time
from datetime import datetime
import argparse
import gym
from gym.envs.registration import register
import torch
import numpy as np
import random
import shap
from PIL import Image, ImageDraw, ImageFont
import imageio
import matplotlib.pyplot as plt
from PPO import PPO
import pdb
def visualize_agent(args, env):
# action space dimension
if args.has_continuous_action_space:
action_dim = env.action_space.shape[0]
else:
action_dim = env.action_space.n
ppo_agents = []
for i in range(args.n_agents):
ppo_agents.append(PPO(args.obs_dim, args.obs_size, action_dim, args.lr_actor, args.lr_critic, args.gamma, args.K_epochs, args.eps_clip, args.has_continuous_action_space, args.action_std))
directory = "storage" + '/' + args.policy_env + '/'
for i in range(args.n_agents):
checkpoint_path = directory + f"PPO_{args.policy_env}_seed_{args.random_seed}_run_{args.run_num_pretrained}_agent_{i}.pth"
ppo_agents[i].load(checkpoint_path)
print("--------------------------------------------------------------------------------------------")
test_running_reward = 0
frame_count = 0
img_buffer = []
for ep_idx in range(1, args.n_episodes+ 1):
obs = env.reset()
ep_reward = 0
# render first frame of env
render_image(img_buffer, args, env, ep_idx, t=frame_count, done=False, save=True)
for t in range(1, args.max_ep_len+ 1):
# select action with policy
actions = []
for i in range(args.n_agents):
action = ppo_agents[i].select_action(obs[i])
actions.append(action)
obs, reward, done, _ = env.step(actions)
ep_reward += reward
render_image(img_buffer, args, env, ep_idx, frame_count, done, ep_reward, save=True)
frame_count += 1
if done:
# add some frames to make the gif "hang" at the end
for end_frames in range(args.gif_n_end_frames):
frame_count += 1
render_image(img_buffer, args, env, ep_idx, frame_count, done, ep_reward, save=True)
break
# clear agent buffer
for i in range(args.n_agents):
ppo_agents[i].buffer.clear()
test_running_reward += ep_reward
print('Episode: {} \t Reward: {}'.format(ep_idx, round(ep_reward.item(), 2)))
ep_reward = 0
env.close()
print("============================================================================================")
avg_test_reward = test_running_reward / args.n_episodes
avg_test_reward = round(avg_test_reward.item(), 2)
print("average test reward : " + str(avg_test_reward))
print("============================================================================================")
# save gif to disk
save_gif(args, img_buffer)
def feature_importance_SHAP(args, env):
# action space dimension
if args.has_continuous_action_space:
action_dim = env.action_space.shape[0]
else:
action_dim = env.action_space.n
ppo_agents = []
for i in range(args.n_agents):
ppo_agents.append(PPO(args.obs_dim, args.obs_size, action_dim, args.lr_actor, args.lr_critic, args.gamma, args.K_epochs, args.eps_clip, args.has_continuous_action_space, args.action_std))
directory = "storage" + '/' + args.policy_env + '/'
for i in range(args.n_agents):
checkpoint_path = directory + f"PPO_{args.policy_env}_seed_{args.random_seed}_run_{args.run_num_pretrained}_agent_{i}.pth"
ppo_agents[i].load(checkpoint_path)
print("--------------------------------------------------------------------------------------------")
#TODO so this isn't actually used with the "partition" explainer method, it would only be needed if we did CIU (I think, double check that actually) or the "Sampling" explainer
# create a "background" dataset of agent observations
test_running_reward = 0
frame_count = 0
img_buffer = []
obs = env.reset()
obs_buffer = []
for ep_idx in range(1, args.n_episodes_background + 1):
obs = env.reset()
# img = env.render()
# pdb.set_trace()
obs_buffer.append( | np.expand_dims(obs[0], 0) | numpy.expand_dims |
import ast
import glob
import xnet
import numpy as np
import matplotlib.pyplot as plt
from igraph import *
from textwrap import wrap
from sklearn.cluster import KMeans
from util import get_attr_pacs, get_pac_list
from sklearn.cluster import AgglomerativeClustering
from sklearn.feature_extraction.text import CountVectorizer
# data = xnet.xnet2igraph('../data/citation_network_ge1990_pacs.xnet')
def get_papers(colab_net, author_vtx):
edges = colab_net.incident(author_vtx)
papers = set()
for e in edges:
papers |= set(ast.literal_eval(colab_net.es[e]['papers']))
return papers
def to_str(pac_list):
pac_str = "none"
for pac in pac_list:
if pac == 'None':
continue
pac = pac[:2]
pac_str += " " + pac
# if "," in pac_list or ";" in pac_list:
# print("ERRO, PRECISA APLICAR SPLIT")
# print(pac_str)
return pac_str
def authors2str(colab_net, citation_net):
authors = colab_net.vs
vecs = []
i = 0
for author in authors:
i += 1
if i % 10000 == 0:
print(i)
paper_idxs = get_papers(colab_net, author)
papers = citation_net.vs.select(numeric_id_in=paper_idxs)
if not len(paper_idxs) == len(papers):
print("DEU RUIM")
pacs = papers['PACS-0'] + papers['PACS-1'] + papers['PACS-2'] + papers['PACS-3'] + papers['PACS-4']
vec = to_str(pacs)
vecs.append(vec)
return vecs
def authors2vec(vecs):
vocab = ['01', '02', '03', '04', '05', '06', '07', '11', '12', '13', '14', '21', '22', '23', '24', '25', '26', '27',
'28', '29', '31', '32', '33', '34']
vocab += ['35', '36', '37', '41', '42', '43', '44', '45', '46', '47', '51', '52', '61', '62', '63', '64', '65',
'66', '67', '68', '71', '72', '73', '74']
vocab += ['75', '76', '77', '78', '79', '81', '82', '83', '84', '85', '86', '87', '88', '89', '91', '92', '93',
'94', '95', '96', '97', '98', 'none']
vectorizer = CountVectorizer(vocabulary=vocab)
X = vectorizer.fit_transform(vecs)
# print(vectorizer.vocabulary_)
return X.toarray()
def clustering_by_pac(colab_net, citation_net, year):
vecs = authors2str(colab_net, citation_net)
bow = authors2vec(vecs)
for i, line in enumerate(bow):
colab_net.vs[i]['bow'] = str(line.tolist())
# for line in bow[:100]:
# print(line.tolist())
clustering = AgglomerativeClustering(n_clusters=5, affinity='cosine', linkage='average')
fited_clustering = clustering.fit(bow)
labels = fited_clustering.labels_
labels = [str(l) for l in labels]
colab_net.vs['pac_hier'] = labels
print('hierarquico', np.unique(labels, return_counts=True))
clustering = KMeans(n_clusters=5, random_state=0)
fited_clustering = clustering.fit(bow)
labels = fited_clustering.labels_
labels = [str(l) for l in labels]
print('kmeans', np.unique(labels, return_counts=True))
colab_net.vs['pac_kmeans'] = labels
# pca = PCA(n_components=3)
# bow_3d = pca.fit_transform(bow)
# print(bow_3d)
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# color = {'0':'red','1':'blue','2':'green','3':'black','4':'orange','5':'yellow','6':'pink','7':'purple','8':'magenta','9':'cyan'}
# colors = [color[l] for l in labels]
# ax.scatter(bow_3d[:,0],bow_3d[:,1],bow_3d[:,2],c=colors,alpha=0.5)
# plt.savefig('test_kmeans_'+str(year)+'.pdf',format='pdf')
return colab_net
def get_largest_component(g):
components = g.components()
giant = components.giant()
return giant
def identify_communities_multilevel(net, lvl):
giant = get_largest_component(net)
comm_level = giant.community_multilevel(weights='weight', return_levels=True)
t = len(comm_level) - 1
comms = comm_level[min(lvl, t)]
comm_list = comms.subgraphs() # communities in current level
print('Level', min(lvl, t), 'Number of communities identified:', len(comm_list))
net_copy = net.copy()
net_copy.vs['community'] = "-1"
for idx, c_graph in enumerate(comm_list):
for v1 in c_graph.vs:
v2 = net_copy.vs.find(name=v1['name'])
v2['community'] = str(idx + 1)
return net_copy
def pac_nets_comm(files):
for f in files:
pac_net = xnet.xnet2igraph(f)
pac_net = identify_communities_multilevel(pac_net, 0)
# xnet.igraph2xnet(pac_net, f[:-5]+'_multilevel2.xnet')
def authors_to_str():
year = 1990
for f in files:
print(f)
colab_net = xnet.xnet2igraph(f)
vtx2del = [vtx for vtx in colab_net.vs if colab_net.degree(vtx) == 0]
colab_net.delete_vertices(vtx2del)
vecs = authors2str(colab_net, data)
for vtx, vec in zip(colab_net.vs, vecs):
vtx['pac_list'] = vec
xnet.igraph2xnet(colab_net, f[:-5] + '_with_author_pac_list.xnet')
year += 1
if __name__ == '__main__':
code2name = {'01': "Communication, education, history, and philosophy",
'02': "Mathematical methods in physics",
'03': "Quantum mechanics, fiel theories, and special relativity",
'04': "General relativity and gravitation",
'05': "Statistical physics, thermodynamics, and nonlinear dynamical systems",
'11': "General theory of fields and particles",
'12': "Specific theories and interaction models; particle systematics",
'13': "Specific reactions and phenomenology",
'14': "Properties of specific particles",
'21': 'Nuclear structure',
'24': 'Nuclear reactions: general',
'25': 'Nuclear reactions: specific reactions',
'27': 'Properties of specific nuclei listed by mass ranges',
'29': 'Experimental methods and instrumentation for elementary particle and nuclear physics',
'32': 'Atomic properties and interactions with photons',
'34': 'Atomic and molecular collision processes and interactions',
'41': 'Electromagnetism; electron and ion optics',
'42': 'Optics',
'47': 'Fluid dynamics',
'52': 'Physics of plasmas and electric discharges',
'61': 'Structure of solids and liquids; crystallography',
'64': 'Equations of state, phase equilibria, and phase transitions',
'68': 'Surfaces and interfaces; thin films and nanosystems (structure and nonelectronic properties)',
'71': 'Electronic structure of bulk materials',
'73': 'Electronic structure and electrical properties of surfaces, interfaces, thin films, and low-dimensional structures',
'74': 'Superconductivity',
'75': 'Magnetic properties and materials',
'78': 'Optical properties, condensed - matter spectroscopy and other interactions of radiation and particles with condensed matter',
'81': 'Materials science',
'82': 'Physical chemistry and chemical physics',
'84': 'Electronics; radiowave and microwave technology; direct energy conversion and storage',
'87': 'Biological and medical physics',
'94': 'Physics of the ionosphere and magnetosphere',
'95': 'Fundamental astronomy and astrophysics; instrumentation, techniques, and astronomical observations',
'97': 'Stars',
'98': 'Stellar systems; interstellar medium; galactic and extragalactic objects and systems; the Unive'}
# authors_to_str()
# files = 'colabs/wbasic/*0.5*selected_wb_with_author_pac_list.xnet'
files = 'data/pacs/2lvls/*delta4_v3.xnet'
#
files = glob.glob(files)
files = sorted(files)
#
# pac_nets_comm(files)
# PACS distribuição para 2010
file = 'data/pacs/2lvls/pac_net_2006_2lvls_delta4_v3_multilevel2.xnet'
pacs_net_2006 = xnet.xnet2igraph(file)
data = xnet.xnet2igraph('data/citation_network_ge1991_pacs.xnet')
papers_2006 = data.vs.select(year_ge=2006, year_le=2010)
attr_pacs = get_attr_pacs()
pac_list = get_pac_list()
comms = dict()
for pac in pacs_net_2006.vs:
comms[pac['name']] = pac['community']
pac_disc = defaultdict(lambda: [])
paper_count = defaultdict(lambda: set())
author_count = defaultdict(lambda: set())
for paper in papers_2006:
for attr in attr_pacs:
pac = paper[attr][:2]
if pac not in pac_list:
continue
c = comms[pac]
paper_count[c].add(paper.index)
pac_disc[c].append(pac)
for author in paper['authors_idxs'].split(','):
author_count[c].add(author)
print('pac count')
for comm in pac_disc.keys():
print(comm)
dist_pacs = pac_disc[comm]
u, c = np.unique(dist_pacs, return_counts=True)
print(sum(c))
idx = | np.argsort(-c) | numpy.argsort |
"""
Experiment 1: swarm tec correlation
- for various background estimation sizes and artifact keys:
- collect random days
- get dtec prof
- interpolate swarm dne at the profile points
- estimate mean and covariance between the two
"""
import numpy as np
import pandas
from ttools import io, rbf_inversion, swarm, utils, config, convert
LW = 9
def run_experiment(n, bg_est_shape, artifact_key):
start_date = np.datetime64("2014-01-01")
end_date = np.datetime64("2020-01-01")
time_range_days = (end_date - start_date).astype('timedelta64[D]').astype(int)
offsets = np.random.randint(0, time_range_days, n)
dates = start_date + offsets.astype('timedelta64[D]')
x = []
dne = []
mlat_x = []
mlat_dne = []
for date in dates:
_x, _dne, _mlat_x, _mlat_dne = run_day(date, bg_est_shape, artifact_key)
x += _x
dne += _dne
mlat_x += _mlat_x
mlat_dne += _mlat_dne
x = np.concatenate(x, axis=0)
dne = np.concatenate(dne, axis=0)
mlat_x = np.array(mlat_x)
mlat_dne = np.array(mlat_dne)
data = np.column_stack((x, dne))
mean = np.nanmean(data, axis=0)
cov = pandas.DataFrame(data=data).corr().values
mlat_data = np.column_stack((mlat_x, mlat_dne))
mlat_mean = np.nanmean(mlat_data, axis=0)
mlat_cov = pandas.DataFrame(data=mlat_data).cov().values
return mean, cov, mlat_mean, mlat_cov
def run_day(date, bg_est_shape, artifact_key):
print(f"Running {date}")
one_h = np.timedelta64(1, 'h')
start_time = date.astype('datetime64[D]').astype('datetime64[s]')
end_time = start_time + np.timedelta64(1, 'D')
comparison_times = np.arange(start_time, end_time, one_h)
swarm_segments = swarm.get_segments_data(comparison_times)
swarm_troughs = swarm.get_swarm_troughs(swarm_segments)
tec_start = comparison_times[0] - | np.floor(bg_est_shape[0] / 2) | numpy.floor |
import pandas as pd
from lifelines import KaplanMeierFitter, CoxPHFitter
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from multiprocessing import Pool
import numpy as np
import functools
from .correlation import intersection, header_list
import plotly
import plotly.offline as opy
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ShuffleSplit, GridSearchCV
import warnings
#######################
### Sklearn Survival ##
#######################
class EarlyStoppingMonitor:
def __init__(self, window_size, max_iter_without_improvement):
self.window_size = window_size
self.max_iter_without_improvement = max_iter_without_improvement
self._best_step = -1
def __call__(self, iteration, estimator, args):
# continue training for first self.window_size iterations
if iteration < self.window_size:
return False
# compute average improvement in last self.window_size iterations.
# oob_improvement_ is the different in negative log partial likelihood
# between the previous and current iteration.
start = iteration - self.window_size + 1
end = iteration + 1
improvement = np.mean(estimator.oob_improvement_[start:end])
if improvement > 1e-6:
self._best_step = iteration
return False # continue fitting
# stop fitting if there was no improvement
# in last max_iter_without_improvement iterations
diff = iteration - self._best_step
return diff >= self.max_iter_without_improvement
def IPC_RIDGE(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.linear_model import IPCRidge
from sklearn.pipeline import make_pipeline
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
y_train_log = y_train.copy()
y_train_log["time"] = np.log1p(y_train["time"])
y_test_log = y_test.copy()
y_test_log["time"] = np.log1p(y_test["time"])
#https://github.com/sebp/scikit-survival/issues/41
n_alphas = 50
alphas = np.logspace(-10, 1, n_alphas)
gcv = GridSearchCV(IPCRidge(max_iter=100000),
{"alpha":alphas},
cv = 2,
n_jobs=10).fit(X_train,y_train_log)
best_model = gcv.best_estimator_.named_steps["IPCRidge"]
alpha = best_model.alphas_
scoreTraining = best_model.score(X_train,y_train_log)
scoreTest = best_model.score(X_test,y_test_log)
feature = pd.DataFrame(best_model.coef_, index=lFeature)[0]
return scoreTraining, scoreTest, feature
def score_survival_model(model, X, y):
from sksurv.metrics import concordance_index_censored
prediction = model.predict(X)
result = concordance_index_censored(y['event'], y['time'], prediction)
return result[0]
def SurvivalSVM(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.svm import FastSurvivalSVM
import numpy as np
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
ssvm = FastSurvivalSVM(max_iter=100, tol=1e-5, random_state=seed)
param_grid = {'alpha': 2. ** np.arange(-12, 13, 4)}
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=seed)
gcv = GridSearchCV(ssvm, param_grid, scoring=score_survival_model,
n_jobs = n_core , refit=False,
cv=cv)
warnings.filterwarnings("ignore", category=FutureWarning)
gcv = gcv.fit(X_train, y_train)
ssvm.set_params(**gcv.best_params_)
ssvm.fit(X_train, y_train)
scoreTraining = ssvm.score(X_train,y_train)
scoreTest = ssvm.score(X_test,y_test)
feature = pd.Series(ssvm.coef_, index=lFeature)
return scoreTraining, scoreTest, feature
def PenaltyCox(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sklearn.pipeline import make_pipeline
seed = np.random.RandomState(seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = CoxnetSurvivalAnalysis(alpha_min_ratio=0.12, l1_ratio=0.9, max_iter=100)
#https://github.com/sebp/scikit-survival/issues/41
model.set_params(max_iter = 100, n_alphas = 50)
model.fit(X_train, y_train)
warnings.simplefilter("ignore", ConvergenceWarning)
alphas = model.alphas_
gcv = GridSearchCV(
make_pipeline(CoxnetSurvivalAnalysis(l1_ratio=0.9, max_iter=1000)),
param_grid={"coxnetsurvivalanalysis__alphas": [[v] for v in alphas]},
cv = 2,
n_jobs= n_core).fit(X_train,y_train)
best_model = gcv.best_estimator_.named_steps["coxnetsurvivalanalysis"]
alpha = best_model.alphas_
scoreTraining = best_model.score(X_train,y_train)
scoreTest = best_model.score(X_test,y_test)
feature = pd.DataFrame(best_model.coef_, index=lFeature)[0]
return scoreTraining, scoreTest, feature
def SurvivalForest(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.ensemble import RandomSurvivalForest
from eli5.formatters import format_as_dataframe
from eli5.sklearn import explain_weights_sklearn
from eli5.sklearn import PermutationImportance
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
rsf = RandomSurvivalForest(n_estimators=300,
min_samples_split=10,
min_samples_leaf=15,
max_features="sqrt",
n_jobs= n_core,
random_state=seed)
rsf.fit(X_train, y_train)
scoreTraining = rsf.score(X_train,y_train)
scoreTest = rsf.score(X_test,y_test)
perm = PermutationImportance(rsf, n_iter=3, random_state=seed)
perm.fit(X_test, y_test)
feature = format_as_dataframe(explain_weights_sklearn(perm, feature_names=lFeature, top = len(lFeature) ))
feature = pd.Series(feature["weight"].tolist(), index=feature["feature"].tolist())
#feature = pd.DataFrame(rsf.feature_importances_, index=lFeature)
return scoreTraining, scoreTest, feature
def gradient_boosted_models(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.ensemble import GradientBoostingSurvivalAnalysis
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
model = GradientBoostingSurvivalAnalysis(
n_estimators=1000, learning_rate=0.05, subsample=0.5,
max_depth=1, random_state=seed
)
monitor = EarlyStoppingMonitor(25, 100)
model.fit(X_train, y_train, monitor=monitor)
scoreTraining = model.score(X_train,y_train)
scoreTest = model.score(X_test,y_test)
feature = pd.Series(model.feature_importances_, index=lFeature)
return scoreTraining, scoreTest, feature
def survival_selection(data, k = 10, topk = 100, event = "event", n_core = 2, seed = 123):
from sksurv.datasets import get_x_y
from sklearn.model_selection import StratifiedKFold
import copy
from miopy.feature_selection import sort_abs
# list of classifiers, selected on the basis of our previous paper "
modelList = [
[gradient_boosted_models,"Gradient Boosted Models"],
[SurvivalSVM,"Support Vector Machine"],
#[SurvivalForest,"Random Forest",],
[PenaltyCox,"Penalized Cox",]
]
print("Loading dataset...")
X, Y = get_x_y(data, attr_labels = [event,"time"], pos_label=0)
skf = StratifiedKFold(n_splits=k, shuffle=True, random_state = | np.random.RandomState(seed) | numpy.random.RandomState |
try:
from matplotlib import pyplot as plt
import matplotlib
except:
import matplotlib
matplotlib.rcParams['backend'] = 'TkAgg'
from matplotlib import pyplot as plt
import numpy as np
import pdb
def cornertex(s, ax, offset=(0,0), fontsize=14):
plt.text(0.02+offset[0],0.95+offset[1],s,transform=ax.transAxes,color='k',va='top',ha='left',fontsize=fontsize, zorder=100)
class DataPlt():
'''
Dynamic plot context, intended for displaying geometries.
like removing axes, equal axis, dynamically tune your figure and save it.
Args:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
Attributes:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
ax (Axes): matplotlib Axes instance.
Examples:
with DynamicShow() as ds:
c = Circle([2, 2], radius=1.0)
ds.ax.add_patch(c)
'''
def __init__(self, figsize=(6, 4), filename=None, dpi=300):
self.figsize = figsize
self.filename = filename
self.ax = None
self.fig = None
def __enter__(self):
_setup_mpl()
plt.ion()
self.fig = plt.figure(figsize=self.figsize)
self.ax = plt.gca()
return self
def __exit__(self, *args):
if self.filename is not None:
print('Press `c` to save figure to "%s", `Ctrl+d` to break >>' %
self.filename)
pdb.set_trace()
plt.savefig(self.filename, dpi=300)
else:
pdb.set_trace()
class NoBoxPlt():
'''
Dynamic plot context, intended for displaying geometries.
like removing axes, equal axis, dynamically tune your figure and save it.
Args:
figsize (tuple, default=(6,4)): figure size.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
Attributes:
figsize (tuple, default=(6,4)): figure size.
graph_layout (tuple|None): number of graphs, None for single graph.
filename (filename, str): filename to store generated figure, if None, it will not save a figure.
ax (Axes): matplotlib Axes instance.
Examples:
with DynamicShow() as ds:
c = Circle([2, 2], radius=1.0)
ds.ax.add_patch(c)
'''
def __init__(self, figsize=(6, 4), graph_layout=None, filename=None, dpi=300):
self.figsize = figsize
self.filename = filename
self.ax = None
self.graph_layout = graph_layout
def __enter__(self):
_setup_mpl()
plt.ion()
self.fig = plt.figure(figsize=self.figsize)
if self.graph_layout is None:
self.ax = plt.subplot(111)
else:
self.ax = []
self.gs = plt.GridSpec(*self.graph_layout)
for i in range(self.graph_layout[0]):
for j in range(self.graph_layout[1]):
self.ax.append(plt.subplot(self.gs[i, j]))
return self
def __exit__(self, *args):
axes = [self.ax] if self.graph_layout is None else self.ax
for ax in axes:
ax.axis('equal')
ax.axis('off')
plt.tight_layout()
if self.filename is not None:
print('Press `c` to save figure to "%s", `Ctrl+d` to break >>' %
self.filename)
pdb.set_trace()
plt.savefig(self.filename, dpi=300)
else:
pdb.set_trace()
def _setup_mpl():
'''customize matplotlib.'''
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.titlesize'] = 18
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['figure.titlesize'] = 18
def _setup_font():
myfont = matplotlib.font_manager.FontProperties(
family='wqy', fname='/usr/share/fonts/truetype/wqy/wqy-microhei.ttc')
matplotlib.rcParams["pdf.fonttype"] = 42
return myfont
def visualize_tree(pairs, geometry):
if len(geometry)==2:
xs, ys = np.meshgrid(np.arange(geometry[0]), np.arange(geometry[1]), indexing='ij')
else:
num_bit = geometry[0]
t = np.linspace(0,2*np.pi*(num_bit-1)/num_bit,num_bit)
xs, ys = np.cos(t), np.sin(t)
locs = np.concatenate([xs[...,None], ys[...,None]], axis=-1).reshape([-1,2])
plt.scatter(locs[:,0], locs[:,1], s=80, zorder=101)
for i, loc in enumerate(locs):
plt.text(loc[0], loc[1]-0.2, '%d'%i, fontsize=18, va='center', ha='center')
wl = np.array([p[2] for p in pairs])
w_interval = wl.max()-wl.min()
wl/=w_interval*1.2
wl-=wl.min()-0.01
print(wl)
for (i, j, _), w in zip(pairs, wl):
start, end = locs[i], locs[j]
cmap = plt.get_cmap('jet')
plt.plot([start[0], end[0]], [start[1], end[1]],color=cmap(w*10))
def visualize_tree(pairs, geometry, engine='viznet', offsets=None):
if len(geometry)==2:
xs, ys = np.meshgrid(np.arange(geometry[0]), np.arange(geometry[1]), indexing='ij')
num_bit = np.prod(geometry)
else:
num_bit = geometry[0]
t = np.linspace(0,2*np.pi*(num_bit-1)/num_bit,num_bit)
xs, ys = np.sqrt(num_bit)/2.5*np.cos(t), np.sqrt(num_bit)/2.5*np.sin(t)
locs = | np.concatenate([xs[...,None], ys[...,None]], axis=-1) | numpy.concatenate |
#%%
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
import seaborn as sns
import numpy as np
import pickle
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
import sys
sys.path.append("../../src/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_colors(colors, inds):
c = [colors[i] for i in inds]
return c
def generate_2d_rotation(theta=0, acorn=None):
if acorn is not None:
np.random.seed(acorn)
R = np.array([
[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]
])
return R
def generate_spirals(N, D=2, K=5, noise = 0.5, acorn = None, density=0.3):
#N number of poinst per class
#D number of features,
#K number of classes
X = []
Y = []
if acorn is not None:
np.random.seed(acorn)
if K == 2:
turns = 2
elif K==3:
turns = 2.5
elif K==5:
turns = 3.5
elif K==7:
turns = 4.5
else:
print ("sorry, can't currently surpport %s classes " %K)
return
mvt = np.random.multinomial(N, 1/K * np.ones(K))
if K == 2:
r = np.random.uniform(0,1,size=int(N/K))
r = np.sort(r)
t = np.linspace(0, np.pi* 4 * turns/K, int(N/K)) + noise * np.random.normal(0, density, int(N/K))
dx = r * np.cos(t)
dy = r* np.sin(t)
X.append(np.vstack([dx, dy]).T )
X.append(np.vstack([-dx, -dy]).T)
Y += [0] * int(N/K)
Y += [1] * int(N/K)
else:
for j in range(1, K+1):
r = np.linspace(0.01, 1, int(mvt[j-1]))
t = np.linspace((j-1) * np.pi *4 *turns/K, j* np.pi * 4* turns/K, int(mvt[j-1])) + noise * np.random.normal(0, density, int(mvt[j-1]))
dx = r * np.cos(t)
dy = r* np.sin(t)
dd = np.vstack([dx, dy]).T
X.append(dd)
#label
Y += [j-1] * int(mvt[j-1])
return np.vstack(X), np.array(Y).astype(int)
#%%
def experiment(n_spiral3, n_spiral5, n_test, reps, n_trees, max_depth, acorn=None):
#print(1)
if n_spiral3==0 and n_rxor==0:
raise ValueError('Wake up and provide samples to train!!!')
if acorn != None:
np.random.seed(acorn)
errors = np.zeros((reps,4),dtype=float)
for i in range(reps):
l2f = LifeLongDNN()
uf = LifeLongDNN()
#source data
spiral3, label_spiral3 = generate_spirals(n_spiral3, 2, 3, noise = 2.5)
test_spiral3, test_label_spiral3 = generate_spirals(n_test, 2, 3, noise = 2.5)
#target data
spiral5, label_spiral5 = generate_spirals(n_spiral5, 2, 5, noise = 2.5)
test_spiral5, test_label_spiral5 = generate_spirals(n_test, 2, 5, noise = 2.5)
if n_spiral3 == 0:
l2f.new_forest(spiral5, label_spiral5, n_estimators=n_trees,max_depth=max_depth)
errors[i,0] = 0.5
errors[i,1] = 0.5
uf_task2=l2f.predict(test_spiral5, representation=0, decider=0)
l2f_task2=l2f.predict(test_spiral5, representation='all', decider=0)
errors[i,2] = 1 - | np.sum(uf_task2 == test_label_spiral5) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""RBDL model interface used in priority tasks.
This is based on the implementation in `https://github.com/ADVRHumanoids/ModelInterfaceRBDL`, which is licensed under
the LPGLv3.
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
- [2] "Robot Control for Dummies: Insights and Examples using OpenSoT", Hoffman et al., 2017
- [3] "Rigid Body Dynamics Algorithms", Featherstone, 2008
"""
import numpy as np
import rbdl
from pyrobolearn.priorities.models import ModelInterface
from pyrobolearn.utils.transformation import get_quaternion_from_matrix
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME> (C++)", "<NAME> (C++)", "<NAME> (Python + doc)"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class RBDLModelInterface(ModelInterface):
r"""RBDL Model interface.
"""
def __init__(self, urdf, floating_base=False, verbose=False):
"""
Initialize the RBDL model interface.
Args:
urdf (str): path to the URDF file.
floating_base (bool): set this variable to True, if we have a floating-based robot.
verbose (bool): if True, it will print information when loading the URDF.
"""
# load the RBDL model
model = rbdl.loadModel(filename=urdf.encode(), floating_base=floating_base, verbose=verbose)
# call parent constructor
super(RBDLModelInterface, self).__init__(model)
# define joint attributes
self.zeros = np.zeros(self.model.q_size)
self._q = np.zeros(self.model.q_size)
self._dq = np.zeros(self.model.qdot_size)
self._ddq = np.zeros(self.model.qdot_size)
self.mass = 0
for body_id in range(len(self.model.mBodies)):
body = self.model.mBodies[body_id]
self.mass += body.mMass
self.com = np.zeros(3)
self.com_vel = np.zeros(3)
self.com_acc = np.zeros(3)
self.angular_momentum_com = np.zeros(3)
self.change_angular_momentum_com = np.zeros(3)
##############
# Properties #
##############
@property
def model(self):
"""Return the model instance."""
return self._model
@model.setter
def model(self, model):
"""Set the model instance."""
if not isinstance(model, rbdl.Model):
raise TypeError("Expecting the given 'model' to be an instance of `rbdl.Model`, instead got: "
"{}".format(type(model)))
self._model = model
@property
def num_dofs(self):
"""Return the number of degrees of freedom."""
return self.model.dof_count
###########
# Methods #
###########
def get_link_id(self, link):
"""
Return the link id associated with the given name.
Args:
link (str, int): unique link name (or id). If id, it will just return the argument.
Returns:
int: unique link id
"""
if isinstance(link, (str, bytes)):
if link == 'world' or link == 'ROOT':
link = self.model.GetBodyId('ROOT'.encode())
else:
link = self.model.GetBodyId(link.encode())
if link >= 4294967295:
raise ValueError("The given link doesn't exist in the RBDL model.")
return link
def get_mass(self):
"""
Return the total mass of the model.
Returns:
float: total mass
"""
return self.mass
def has_floating_base(self):
"""
Return True if we have a floating base.
Returns:
bool: True if floating base.
"""
if len(self.model.mBodies) - 1 != self.model.dof_count: # mBodies contains 'ROOT' as well
return True
return False
def get_floating_base_link(self):
"""
Return the floating base link.
Returns:
int: floating base link
"""
pass
def get_joint_positions(self):
"""
Get the joint positions.
Returns:
np.array[float[N]]: the joint positions.
"""
return self._q
def get_joint_velocities(self):
"""
Get the joint velocities.
Returns:
np.array[float[N]]: the joint positions.
"""
return self._dq
def get_joint_accelerations(self):
"""
Get the joint accelerations.
Returns:
np.array[float[N]]: the joint positions.
"""
return self._ddq
def get_com_position(self):
"""
Get the position of the center of mass (CoM).
Returns:
np.array[float[3]]: position of the center of mass
"""
# return rbdl.CalcCenterOfMass(self.model, self._q, self._dq, self._ddq, self.com, self.com_vel, self.com_acc,
# self.angular_momentum_com, self.change_angular_momentum_com,
# update_kinematics=True)
rbdl.CalcCenterOfMass(self.model, self._q, self._dq, self._ddq, self.com) # TODO: update library
return self.com
def get_com_velocity(self):
"""
Get the linear CoM velocity.
Returns:
np.array[float[3]]: CoM velocity.
"""
rbdl.CalcCenterOfMass(self.model, self._q, self._dq, self._ddq, self.com, com_velocity=self.com_vel)
return self.com_vel
def get_com_acceleration(self):
"""
Get the linear CoM acceleration.
Returns:
np.array[float[3]]: CoM acceleration.
"""
rbdl.CalcCenterOfMass(self.model, self._q, self._dq, self._ddq, self.com, com_acceleration=self.com_acc)
return self.com_acc
def get_com_jacobian(self, full=False):
"""
Get the CoM Jacobian.
Args:
full (bool): if True, it will return the jacobian as the concatenation of the angular and linear Jacobian.
Otherwise, it will just return the linear Jacobian.
Returns:
if full:
np.array[float[6,N]]: CoM Jacobian (concatenation of the angular and linear Jacobian, where N is the
number of DoFs)
else:
np.array[float[3,N]]: CoM Jacobian (only the linear part)
"""
jacobian = np.zeros((6, self.num_dofs)) if full else np.zeros((3, self.num_dofs))
mass = 0
for body_id in range(len(self.model.mBodies)):
body = self.model.mBodies[body_id]
jac = np.zeros(3, self.num_dofs)
rbdl.CalcPointJacobian(self.model, self._q, body_id, body.mCenterOfMass, jac, update_kinematics=False)
if full:
jacobian[3:] += body.mMass * jac
else:
jacobian += body.mMass * jac
mass += body.mMass
return jacobian / mass
def get_gravity(self):
"""
Get the gravity vector applied on the model.
Returns:
np.array[float[3]]: gravity vector expressed in the world frame.
"""
return m.gravity
def set_gravity(self, gravity):
"""
Set the gravity vector applied on the model.
Args:
gravity (np.array[float[3]]): gravity vector expressed in the world frame.
"""
m.gravity = gravity
def get_model_ordered_joint_names(self):
"""
Get the model ordered joint names.
Returns:
list[str]: list of joint names.
"""
pass
def get_jacobian(self, link, wrt_link=None, point=(0., 0., 0.)): # TODO: wrt_link
r"""
Get the 6D Jacobian for a point on a link, that when multiplied with :math:`\dot{q}` gives a 6D vector that
has the angular velocity as the first three entries and the linear velocity as the last three entries.
.. math:: v = [\omega, \dot{p}] = J(q) \dot{q}
where :math:`J(q)` is the concatenation of the angular and linear Jacobian.
Args:
link (int, str): unique link id, or name.
wrt_link (int, str, None): unique link id, or name. If specified, it will take the relative jacobian. If
None, the jacobian will be taken with respect to the world frame.
point (np.array[float[3]]): position of the point in link's local frame.
Returns:
np.array[float[6,N]]: 6D Jacobian (=concatenation of the angular and linear Jacobian).
"""
link = self.get_link_id(link)
jacobian = np.zeros((6, self.num_dofs))
point = np.asarray(point)
rbdl.CalcPointJacobian6D(self.model, self._q, link, point, jacobian, update_kinematics=False)
return jacobian
def get_pose(self, link, wrt_link=None, point=(0., 0., 0.)):
"""
Return the pose of the specified link with respect to the other given link.
Args:
link (int, str): unique link id, or name.
wrt_link (int, str, None): the other link id, or name. If None, returns the position wrt to the world, and
if -1 wrt to the base.
point (np.array[float[3]]): position of the point in link's local frame.
Returns:
np.array[float[7]]: pose (position and quaternion expressed as [x,y,z,w])
"""
link = self.get_link_id(link)
point = np.asarray(point)
position = rbdl.CalcBodyToBaseCoordinates(self.model, self._q, link, point, update_kinematics=False)
orientation = rbdl.CalcBodyWorldOrientation(self.model, self._q, link, update_kinematics=False)
orientation = get_quaternion_from_matrix(orientation)
return np.concatenate((position, orientation))
def get_velocity_twist(self, link, point=(0., 0., 0.)):
r"""
Compute the angular and linear velocity of a link, given by :math:`v = [\omega, \dot{p}]`.
Args:
link (int, str): unique link id, or name.
point (np.array[float[3]]): position of the point in link's local frame.
Returns:
np.array[float[6]]: The resulting 6D spatial velocity vector where the first three elements are the angular
velocity and the last three are the linear velocity expressed in the global world reference frame.
"""
link = self.get_link_id(link)
point = | np.asarray(point) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
chebpy.osf
==========
OSS and OSC class.
"""
import numpy as np
from scipy.fftpack import dst, idst, dct, idct
from scipy.fftpack import fft, ifft, fft2, ifft2, fftn, ifftn
__all__ = ['OSS', # Operator Splitting, Sine basis
'OSC', # Operator Splitting, Cosine basis
'OSF', # Operator splitting, Fourier basis, 1D
'OSF2d', # Operator splitting, Fourier basis, 2D
'OSF3d', # Operator splitting, Fourier basis, 3D
]
class OSS(object):
def __init__(self, Lx, N, Ns, h=None):
'''
:param:Lx: physical size of the 1D spacial grid.
:param:Ns: number of grid points in time.
:param:N: number of grid points in space.
:param:h: time step.
'''
self.Lx = Lx
self.N = N
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.update()
def update(self):
ii = np.arange(self.N+1)
self.x = 1. * ii * self.Lx / self.N
k2 = (np.pi/self.Lx)**2 * np.arange(1, self.N)**2
self.expd = np.exp(-self.h * k2)
def solve(self, w, u0, q=None):
'''
dq/dt = Dq + Wq = Dq - wq
'''
u = u0.copy()
v = u[1:-1] # v = {u[1], u[2], ..., u[N-1]}
expw = np.exp(-0.5 * self.h * w[1:-1])
for i in xrange(self.Ns-1):
v = expw * v
ak = dst(v, type=1) / self.N * self.expd
v = 0.5 * idst(ak, type=1)
v = expw * v
if q is not None:
q[i+1, 1:-1] = v
u[1:-1] = v
u[0] = 0.; u[-1] = 0.;
return (u, self.x)
class OSC(object):
def __init__(self, Lx, N, Ns, h=None):
'''
:param:Lx: physical size of the 1D spacial grid.
:param:Ns: number of grid points in time.
:param:N: number of grid points in space.
:param:h: time step.
'''
self.Lx = Lx
self.N = N
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.update()
def update(self):
ii = np.arange(self.N+1)
self.x = 1. * ii * self.Lx / self.N
k2 = (np.pi/self.Lx)**2 * np.arange(self.N+1)**2
self.expd = np.exp(-self.h * k2)
def solve(self, w, u0, q=None):
'''
dq/dt = Dq + Wq = Dq - wq
'''
u = u0.copy()
expw = np.exp(-0.5 * self.h * w)
for i in xrange(self.Ns-1):
u = expw * u
ak = dct(u, type=1) / self.N * self.expd
u = 0.5 * idct(ak, type=1)
u = expw * u
if q is not None:
q[i+1] = u
return (u, self.x)
class OSF(object):
def __init__(self, Lx, N, Ns, h=None):
'''
:param:Lx: physical size of the 1D spacial grid.
:param:N: number of grid points in space.
:param:Ns: number of grid points in time.
:param:h: time step.
'''
self.Lx = Lx
self.N = N
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.update()
def update(self):
Lx = self.Lx
N = self.N
h = self.h
k2 = [i**2 for i in xrange(N/2+1)] # i=0, 1, ..., N/2
k2.extend([(N-i)**2 for i in xrange(N/2+1, N)]) # i=N/2+1, ..., N-1
k2 = np.array(k2) * (2*np.pi/Lx)**2
self.expd = np.exp(-h * k2)
def solve(self, w, u0, q=None):
'''
dq/dt = Dq + Wq = Dq - wq
'''
u = u0.copy()
h = self.h
expw = np.exp(-0.5 * h * w)
for i in xrange(self.Ns-1):
u = expw * u
ak = fft(u) * self.expd
u = ifft(ak).real
u = expw * u
if q is not None:
q[i+1, :] = u
return u
class OSF2d(object):
def __init__(self, Lx, Ly, Nx, Ny, Ns, h=None):
'''
:param:Lx: physical size of the 1D spacial grid.
:param:N: number of grid points in space.
:param:Ns: number of grid points in time.
:param:h: time step.
'''
self.Lx = Lx
self.Ly = Ly
self.Nx = Nx
self.Ny = Ny
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.update()
def update(self):
Lx = self.Lx
Ly = self.Ly
Nx = self.Nx
Ny = self.Ny
h = self.h
ccx = (2*np.pi/Lx)**2
ccy = (2*np.pi/Ly)**2
k2 = np.zeros((Nx,Ny))
for i in xrange(Nx):
for j in xrange(Ny):
if i < Nx/2+1:
kx2 = i**2
else:
kx2 = (Nx-i)**2
if j < Ny/2+1:
ky2 = j**2
else:
ky2 = (Ny-j)**2
k2[i,j] = ccx * kx2 + ccy * ky2
self.expd = np.exp(-h * k2)
def solve(self, w, u0, q=None):
'''
dq/dt = Dq + Wq = Dq - wq
'''
u = u0.copy()
h = self.h
expw = np.exp(-0.5 * h * w)
for i in xrange(self.Ns-1):
u = expw * u
ak = fft2(u) * self.expd
u = ifft2(ak).real
u = expw * u
if q is not None:
q[i+1] = u
return u
class OSF3d(object):
def __init__(self, Lx, Ly, Lz, Nx, Ny, Nz, Ns, h=None):
'''
:param:Lx: physical size of the 1D spacial grid.
:param:N: number of grid points in space.
:param:Ns: number of grid points in time.
:param:h: time step.
'''
self.Lx = Lx
self.Ly = Ly
self.Lz = Lz
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.Ns = Ns
if h is None:
self.h = 1. / (Ns - 1)
else:
self.h = h
self.update()
def update(self):
Lx = self.Lx
Ly = self.Ly
Lz = self.Lz
Nx = self.Nx
Ny = self.Ny
Nz = self.Nz
h = self.h
ccx = (2*np.pi/Lx)**2
ccy = (2*np.pi/Ly)**2
ccz = (2*np.pi/Lz)**2
k2 = | np.zeros((Nx,Ny,Nz)) | numpy.zeros |
""" A module containing LCPrimitive and its subclasses. They implement
components of a pulsar light curve. Includes primitives (Gaussian,
Lorentzian), etc. as well as more sophisticated holistic templates that
provide single-parameter (location) representations of the light curve.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/pulsar/lcprimitives.py,v 1.35 2017/03/17 21:37:52 kerrm Exp $
author: <NAME> <<EMAIL>>
"""
# NB -- big TODO -- I don't think wrapped primitives quite correctly return
# Monte Carlo variables because they don't account for the uniform approx.
# perhaps this isn't a big deal
import numpy as np
from scipy.special import erf,i0,i1
from scipy.integrate import simps,quad
from scipy.interpolate import interp1d
from scipy.stats import norm,cauchy
from math import sin,cos,sinh,cosh,atan,tan
ROOT2PI = (2*np.pi)**0.5
R2DI = (2/np.pi)**0.5
ROOT2 = 2**0.5
TWOPI = (2*np.pi)
PI = np.pi*1
MAXWRAPS = 15
MINWRAPS = 3
WRAPEPS = 1e-8
# TODO -- possible "LCBase" class with certain method common to LCPrimitive and LCTemplate
def two_comp_mc(n,w1,w2,loc,func):
""" Helper function to generate MC photons from a two-sided
distribution.
NB -- this should work as is if w1,w2,loc are vectors.
n -- total number of photons
w1 -- scale parameter for func, lefthand peak
w2 -- scale parameter for func, righthand peak
loc -- position of peak
func -- an 'rvs' function from scipy
"""
frac1 = w1/(w1+w2)
# number of photons required from left side
n1 = (np.random.rand(n) < frac1).sum()
r1 = func(loc=0,scale=w1,size=n1)
# reflect and relocate photons to right or lef side
r1 = loc + np.where(r1<=0,r1,-r1)
r2 = func(loc=0,scale=w2,size=n-n1)
r2 = loc + np.where(r2>0,r2,-r2)
return np.mod(np.append(r1,r2),1)
def approx_gradient(func,phases,log10_ens,eps=1e-6):
""" Return a numerical gradient. This works for both LCPrimitive and
LCTemplate objects. HOW AWESOME!
"""
orig_p = func.get_parameters(free=False).copy()
g = np.zeros([len(orig_p),len(phases)])
weights = np.asarray([-1,8,-8,1])/(12*eps)
def do_step(which,eps):
p0 = orig_p.copy()
p0[which] += eps
func.set_parameters(p0,free=False)
return func(phases,log10_ens)
for i in range(len(orig_p)):
# use a 4th-order central difference scheme
for j,w in zip([2,1,-1,-2],weights):
g[i,:] += w*do_step(i,j*eps)
func.set_parameters(orig_p,free=False)
return g
def check_gradient(func,atol=1e-8,rtol=1e-5,quiet=False):
""" Test gradient function with a set of MC photons.
This works with either LCPrimitive or LCTemplate objects.
TODO -- there is trouble with the numerical gradient when
a for the location-related parameters when the finite step
causes the peak to shift from one side of an evaluation phase
to the other."""
en = np.random.rand(1000)*2 + 1 # 100 MeV to 10 GeV
ph = func.random(en)
if hasattr(func,'closest_to_peak'):
eps = min(1e-6,0.2*func.closest_to_peak(ph))
else:
eps = 1e-6
g1 = func.gradient(ph,en,free=False)
g2 = func.approx_gradient(ph,en,eps=eps)
anyfail = False
for i in range(g1.shape[0]):
d1 = np.abs(g1[i]-g2[i])
a = np.argmax(d1)
fail = np.any(d1 > (atol + rtol*np.abs(g2)))
if not quiet:
pass_string = 'FAILED' if fail else 'passed'
print ('%02d (%s) %.3g (abs)'%(i,pass_string,d1.max()))
anyfail = anyfail or fail
return not anyfail
class Fittable(object):
# TODO
""" Base class for any object with fittable parameters.
Handle parameter names, etc. here?"""
def get_parameters(self): pass
def set_parameters(self): pass
def get_bounds(self): pass
def __call__(self,*args):
return self._call(*args)[self.free]
def _call(self,*args):
raise NotImplementedError('Child class instantiates.')
def grad(self,*args):
return self._grad(*args)[self.free]
def _grad(self,*args):
raise NotImplementedError('Child class instantiates.')
class LCPrimitive(object):
""" Base class for various components of a light curve. All "analytic"
light curve models must inherit and must implement the three
'virtual' functions below."""
def is_energy_dependent(self):
return False
def is_two_sided(self):
""" True if primitive is asymmetric. Default is False, two-sided
child classes should override."""
return False
def copy(self):
from copy import deepcopy
return deepcopy(self)
def __call__(self,phases):
raise NotImplementedError('Virtual function must be implemented by child class.')
def integrate(self,x1=0,x2=1,log10_ens=3):
""" Base implemention with scipy quad."""
f = lambda ph: self(ph,log10_ens)
return quad(f,x1,x2)[0]
def cdf(self,x,log10_ens=3):
return self.integrate(x1=0,x2=x,log10_ens=3)
def fwhm(self):
"""Return the full-width at half-maximum of the light curve model."""
return self.hwhm(0)+self.hwhm(1)
def hwhm(self,right=False):
"""Return the half-width at half-maximum of the light curve model."""
raise NotImplementedError('Virtual function must be implemented by child class.')
def init(self):
self.p = np.asarray([1])
self.pnames = []
self.name = 'Default'
self.shortname = 'None'
def _asarrays(self):
for key in ['p','free','bounds','errors','slope','slope_free']:
if hasattr(self,key):
v = self.__dict__[key]
if v is not None:
self.__dict__[key] = np.asarray(v,dtype=bool if 'free' in key else float)
def _default_bounds(self):
bounds = [[]] *len(self.p)
# this order works for LCHarmonic, too
bounds[0] = [0.005,0.5] # width
bounds[-1] = [-1,1] # position
if len(bounds) > 2:
bounds[1] = [0.005,0.5] # width
return bounds
def _default_priors(self):
loc = self.p.copy()
width = np.asarray([0.1]*len(self.p))
enable = np.asarray([False]*len(self.p))
return loc,width,enable
def __init__(self,**kwargs):
""" Generally, class-specific setup work is performed in init.
Here, init is called and certain guaranteed default members
are established."""
self.init()
if not hasattr(self,'bounds'):
self.bounds = self._default_bounds() # default
self.errors = np.zeros_like(self.p)
self.free = np.asarray([True]*len(self.p))
self.__dict__.update(kwargs)
self._asarrays()
self.gauss_prior_loc, self.gauss_prior_width, self.gauss_prior_enable = self._default_priors()
self.shift_mode = False
def _make_p(self,log10_ens=3):
""" Internal method to return parameters appropriate for use
in functional form."""
return [None] + list(self.p)
def set_parameters(self,p,free=True):
if free:
self.p[self.free] = p
else:
self.p[:] = p
# adjust position to be between 0 and 1
self.p[-1] = self.p[-1] % 1
return np.all(self.p >= 0)
def get_parameters(self,free=True):
if free:
return self.p[self.free]
return self.p
def get_parameter_names(self,free=True):
return [p for (p,b) in zip(self.pnames,self.free) if b]
def set_errors(self,errs):
n = self.free.sum()
self.errors[:] = 0.
self.errors[self.free] = errs[:n]
return n
def get_errors(self,free=True):
return self.errors[self.free]
def get_bounds(self): return self.bounds[self.free]
def get_gauss_prior_parameters(self):
mod_array = [False]*(len(self.p)-1)+[True]
return (
self.gauss_prior_loc[self.free],
self.gauss_prior_width[self.free],
np.asarray(mod_array)[self.free],
self.gauss_prior_enable[self.free],
)
def enable_gauss_prior(self,enable=True):
""" [Convenience] Turn on gaussian prior."""
self.gauss_prior_enable[:] = enable
def center_gauss_prior(self,enable=False):
""" [Convenience] Set gauss mode to current params."""
self.gauss_prior_loc[:] = self.p[:]
if enable: self.enable_gauss_prior()
def get_location(self,error=False):
if error: return np.asarray([self.p[-1],self.errors[-1]])
return self.p[-1]
def set_location(self,loc):
self.p[-1] = loc
def get_norm(self,error=False):
#if error: return np.asarray([self.p[0],self.errors[0]])
#return self.p[0]
return 1
def get_width(self,error=False,hwhm=False,right=False):
""" Return the width of the distribution.
Keyword arguments:
-----------------
error [False] if True, return tuple with value and error
hwhm [False] if True, scale width to be HWHM
right [False] if True, return "right" component, else "left".
There is no distinction for symmetric dists.
"""
scale = self.hwhm(right=right)/self.p[int(right)] if hwhm else 1
if error: return np.asarray([self.p[int(right)],self.errors[int(right)]])*scale
return self.p[int(right)]*scale
def get_gradient(self,phases,log10_ens=3):
raise DeprecationWarning()
return self.gradient(phases,log10_ens,free=True)
def gradient(self,phases,log10_ens=3,free=False):
raise NotImplementedError('No gradient function found for this object.')
def random(self,n):
""" Default is accept/reject."""
if n < 1: return 0
M = self(np.asarray([self.p[-1]])) # peak amplitude
rvals = np.empty(n)
position = 0
rfunc = np.random.rand
while True:
cand_phases = rfunc(n)
cand_phases = cand_phases[rfunc(n) < self(cand_phases)/M]
ncands = len(cand_phases)
if ncands == 0: continue
rvals[position:position + ncands] = cand_phases[:n-position]
position += ncands
if position >= n: break
return rvals
def __str__(self):
m=max([len(n) for n in self.pnames])
l = []
errors = self.errors if hasattr(self,'errors') else [0]*len(self.pnames)
for i in range(len(self.pnames)):
fstring = '' if self.free[i] else ' [FIXED]'
n=self.pnames[i][:m]
t_n = n+(m-len(n))*' '
l += [t_n + ': %.4f +\- %.4f%s'%(self.p[i],errors[i],fstring)]
l = [self.name+'\n------------------'] + l
return '\n'.join(l)
def approx_gradient(self,phases,log10_ens=3,eps=1e-5):
return approx_gradient(self,phases,log10_ens,eps=eps)
def check_gradient(self,atol=1e-8,rtol=1e-5,quiet=False):
return check_gradient(self,atol=atol,rtol=rtol,quiet=quiet)
def sanity_checks(self,eps=1e-6):
""" A few checks on normalization, integration, etc. """
errfac = 1
# Normalization test
y,ye = quad(self,0,1)
#t1 = abs(self.p[0]-y)<(ye*errfac)
t1 = abs(1-y)<(ye*errfac)
# integrate method test
#t2 = abs(self.p[0]-self.integrate(0,1))<eps
t2 = abs(1-self.integrate(0,1))<eps
# FWHM test
t3 = (self(self.p[-1])*0.5-self(self.p[-1]-self.fwhm()/2))<eps
# gradient test
try:
t4 = self.check_gradient(quiet=True)
except: t4 = False
# boundary conditions
t5 = abs(self(0)-self(1-eps))<eps
if not t1: print ('Failed Normalization test')
if not t2: print ('Failed integrate method test')
if not t3: print ('Failed FWHM test')
if not t4: print ('Failed gradient test')
if not t5: print ('Did not pass boundary conditions')
return np.all([t1,t2,t3,t4,t5])
def eval_string(self):
""" Return a string that can be evaled to instantiate a nearly-
identical object."""
return '%s(p=%s,free=%s,slope=%s,slope_free=%s)'%(
self.__class__.__name__,str(list(self.p)),str(list(self.free)),
str(list(self.slope)) if hasattr(self,'slope') else None,
str(list(self.slope_free)) if hasattr(self,'slope_free') else None)
def dict_string(self):
""" Return a string to express the object as a dictionary that can
be easily instantiated using its keys."""
def pretty_list(l,places=5):
fmt = '%.'+'%d'%places+'f'
s = ', '.join([fmt%x for x in l])
return '['+s+']'
t = ['name = %s'%self.__class__.__name__,
'p = %s'%(pretty_list(self.p)),
'free = %s'%(str(list(self.free))),
'slope = %s'%(pretty_list(self.slope) if hasattr(self,'slope') else None),
'slope_free = %s'%(str(list(self.slope_free)) if hasattr(self,'slope_free') else None),
]
#return 'dict(\n'+'\n '.join(t)+'\n
return t
def closest_to_peak(self,phases):
""" Return the minimum distance between a member of the array of
phases and the position of the mode of the primitive."""
return np.abs(phases-self.get_location()).min()
def get_fixed_energy_version(self,log10_en=3):
return self
class LCWrappedFunction(LCPrimitive):
""" Super-class for profiles derived from wrapped functions.
While some distributions (e.g. the wrapped normal) converge
quickly, others (e.g. the wrapped Lorentzian) converge very slowly
and must be truncated before machine precision is reached.
In order to preserve normalization, the pdf is slightly adjusted:
f(phi) = sum_(i,-N,N,g(phi+i)) + (1 - int(phi,-N,N,g(phi)) ).
This introduces an additional parameteric dependence which must
be accounted for by computation of the gradient.
"""
def _norm(self,nwraps,log10_ens=3):
""" Compute the truncated portion of the template."""
#return self.p[0]-self.base_int(-nwraps,nwraps+1)
return 1-self.base_int(-nwraps,nwraps+1,log10_ens)
def _grad_norm(self,nwraps,log10_ens=3):
""" Compute the gradient terms due to truncated portion. That is,
since we add on a uniform component beyond nwraps, the
amplitude of this component depends on the CDF and hence on
the parameters.
Default implementation is to ignore these terms, applicable
for rapidly-converging distributions (e.g. wrapped normal with
small width parameter). On the other hand, it is not
negligible for long-tailed distributions, e.g. Lorentzians."""
return None
def __call__(self,phases,log10_ens=3):
""" Return wrapped template + DC component corresponding to truncation."""
results = self.base_func(phases,log10_ens)
for i in range(1,MAXWRAPS+1):
t = self.base_func(phases,log10_ens,index= i)
t += self.base_func(phases,log10_ens,index=-i)
results += t
if (i>=MINWRAPS) and (np.all(t < WRAPEPS)): break
return results+self._norm(i,log10_ens)
def gradient(self,phases,log10_ens=3,free=False):
""" Return the gradient evaluated at a vector of phases.
output : a num_parameter x len(phases) ndarray,
the num_parameter-dim gradient at each phase
"""
results = self.base_grad(phases,log10_ens)
for i in range(1,MAXWRAPS+1):
t = self.base_grad(phases,log10_ens,index=i)
t += self.base_grad(phases,log10_ens,index=-i)
results += t
if (i >= MINWRAPS) and (np.all(t < WRAPEPS)): break
gn = self._grad_norm(i,log10_ens)
if gn is not None:
for i in range(len(gn)):
results[i,:] += gn[i]
if free:
return results[self.free]
return results
def integrate(self,x1,x2,log10_ens=3):
#if(x1==0) and (x2==0): return 1.
# NB -- this method is probably overkill now.
results = self.base_int(x1,x2,log10_ens,index=0)
for i in range(1,MAXWRAPS+1):
t = self.base_int(x1,x2,log10_ens,index=i)
t += self.base_int(x1,x2,log10_ens,index=-i)
results += t
if np.all(t < WRAPEPS):
break
return results+(x2-x1)*self._norm(i,log10_ens)
def base_func(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_func function found for this object.')
def base_grad(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_grad function found for this object.')
def base_int(self,phases,log10_ens=3,index=0):
raise NotImplementedError(
'No base_int function found for this object.')
class LCGaussian(LCWrappedFunction):
""" Represent a (wrapped) Gaussian peak.
Parameters
Width the standard deviation parameter of the norm dist.
Location the mode of the Gaussian distribution
"""
def init(self):
self.p = np.asarray([0.03,0.5])
self.pnames = ['Width','Location']
self.name = 'Gaussian'
self.shortname = 'G'
def hwhm(self,right=False):
return self.p[0]*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
return (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
def base_grad(self,phases,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z = (phases + index - x0)/width
f = (1./(width*ROOT2PI))*np.exp(-0.5*z**2 )
return np.asarray([f/width*(z**2 - 1.),f/width*z])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,width,x0 = self._make_p(log10_ens)
z1 = (x1 + index - x0)/width
z2 = (x2 + index - x0)/width
return 0.5*(erf(z2/ROOT2)-erf(z1/ROOT2))
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(norm.rvs(loc=self.p[-1],scale=self.p[0],size=n),1)
class LCGaussian2(LCWrappedFunction):
""" Represent a (wrapped) two-sided Gaussian peak.
Parameters
Width1 the standard deviation parameter of the norm dist.
Width2 the standard deviation parameter of the norm dist.
Location the mode of the distribution
"""
def init(self):
self.p = np.asarray([0.03,0.03,0.5])
self.pnames = ['Width1','Width2','Location']
self.name = 'Gaussian2'
self.shortname = 'G2'
def is_two_sided(self):
return True
def hwhm(self,right=False):
return (self.p[int(right)])*(2 * np.log(2))**0.5
def base_func(self,phases,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
z *= np.where(z <= 0, 1./width1, 1./width2)
return (R2DI/(width1+width2)) * np.exp(-0.5*z**2 )
def base_grad(self,phases,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
m = (z <= 0)
w = np.where(m, width1, width2)
z /= w
f = (R2DI/(width1+width2)) * np.exp(-0.5*z**2 )
k = 1./(width1+width2)
z2w = z**2/w
t = f*(z2w-k)
g1 = f*(z2w*( m)-k)
g2 = f*(z2w*(~m)-k)
g3 = f*z/w
return np.asarray([g1,g2,g3])
def base_int(self,x1,x2,log10_ens=3,index=0):
e,width1,width2,x0 = self._make_p(log10_ens)
if index==0 and (x1 < x0) and (x2 > x0):
z1 = (x1 + index - x0)/width1
z2 = (x2 + index - x0)/width2
k1 = 2*width1/(width1+width2)
k2 = 2*width2/(width1+width2)
return 0.5*(k2*erf(z2/ROOT2)-k1*erf(z1/ROOT2))
w = width1 if ((x1+index) < x0) else width2
z1 = (x1 + index - x0)/w
z2 = (x2 + index - x0)/w
k = 2*w/(width1+width2)
return 0.5*k*(erf(z2/ROOT2)-erf(z1/ROOT2))
def random(self,n):
""" Use multinomial technique to return random photons from
both components."""
if hasattr(n,'__len__'):
n = len(n)
return two_comp_mc(n,self.p[0],self.p[1],self.p[-1],norm.rvs)
class LCLorentzian(LCPrimitive):
""" Represent a (wrapped) Lorentzian peak.
Parameters
Width the width paramater of the wrapped Cauchy distribution,
namely HWHM*2PI for narrow distributions
Location the center of the peak in phase
"""
def init(self):
self.p = np.asarray([0.1,0.5])
self.pnames = ['Width','Location']
self.name = 'Lorentzian'
self.shortname = 'L'
def hwhm(self,right=False):
# NB -- bounds on p[1] set such that this is well-defined
return np.arccos( 2-cosh(self.p[0]) )/TWOPI
def __call__(self,phases,log10_ens=3):
e,gamma,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
# NB -- numpy call not as efficient as math.sinh etc.
# but this allows easy inheritance for the energy-dependence
return np.sinh(gamma)/(np.cosh(gamma)-np.cos(z))
def gradient(self,phases,log10_ens=3,free=False):
e,gamma,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
s1 = np.sinh(gamma); c1 = np.cosh(gamma)
c = np.cos(z); s = np.sin(z)
f = s1/(c1-c)
f2 = f**2
g1 = f*(c1/s1) - f2
g2 = f2*(TWOPI/s1)*s
if free:
return np.asarray([g1,g2])[self.free]
return np.asarray([g1,g2])
def random(self,n):
if hasattr(n,'__len__'):
n = len(n)
return np.mod(cauchy.rvs(loc=self.p[-1],scale=self.p[0]/TWOPI,size=n),1)
def integrate(self,x1,x2,log10_ens=3):
# NB -- due to the use of tans below, must be careful to use an angle
# range of -pi/2 to pi/2 rather than 0 to pi as one would want
# I haven't carefully tested this solution
e,gamma,loc = self._make_p(log10_ens)
x1 = PI*(x1-loc)
x2 = PI*(x2-loc)
t = 1./np.tanh(0.5*gamma) # coth(gamma/2)
v2 = np.arctan(t*tan(x2))/PI
v1 = np.arctan(t*tan(x1))/PI
return (v2<=v1) + v2 - v1 # correction for tan wrapping
class LCLorentzian2(LCWrappedFunction):
""" Represent a (wrapped) two-sided Lorentzian peak.
Parameters
Width1 the HWHM of the distribution (left)
Width2 the HWHM of the distribution (right)
Location the mode of the distribution
"""
def init(self):
self.p = np.asarray([0.03,0.03,0.5])
self.pnames = ['Width1','Width2','Location']
self.name = 'Lorentzian2'
self.shortname = 'L2'
def is_two_sided(self):
return True
def hwhm(self,right=False):
return self.p[int(right)]
def _grad_norm(self,nwraps,log10_ens=3):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z1 = (-nwraps-x0)/gamma1
z2 = (nwraps+1-x0)/gamma2
t = gamma2*np.arctan(z2)-gamma1*np.arctan(z1)
t1 = 1./(1+z1**2)
t2 = 1./(1+z2**2)
k = 2/(gamma1+gamma2)/PI
f = k*t
g1 = -1./(gamma1+gamma2)-(np.arctan(z1)-z1*t1)/t
g2 = -1./(gamma1+gamma2)+(np.arctan(z2)-z2*t2)/t
g3 = (t1-t2)/t
return [-f*g1,-f*g2,-f*g3]
def base_func(self,phases,log10_ens=3,index=0):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
z *= np.where(z<=0, 1./gamma1, 1./gamma2)
k = 2/(gamma1+gamma2)/PI
return k/(1+z**2)
def base_grad(self,phases,log10_ens=3,index=0):
e,gamma1,gamma2,x0 = self._make_p(log10_ens)
z = (phases + (index - x0))
m = z < 0
g = np.where(m,1./gamma1,1./gamma2)
t1 = 1+(z*g)**2
t2 = 2*(z*g)/t1
g1 = -1/(gamma1+gamma2)+t2*((m*z)/gamma1**2)
g2 = -1/(gamma1+gamma2)+t2*((~m*z)/gamma2**2)
g3 = t2*g
f = (2./(gamma1+gamma2)/PI)/t1
return np.asarray([f*g1,f*g2,f*g3])
def base_int(self,x1,x2,log10_ens=3,index=0):
gamma1,gamma2,x0 = self.p
# the only case where g1 and g2 can be different is if we're on the
# 0th wrap, i.e. index=0; this also includes the case when we want
# to use base_int to do a "full" integral
if index==0 and (x1 < x0) and (x2 > x0):
g1,g2 = gamma1,gamma2
else:
g1,g2 = [gamma1]*2 if ((x1+index) < x0) else [gamma2]*2
z1 = (x1 + index - x0)/g1
z2 = (x2 + index - x0)/g2
k = (2./(gamma1+gamma2)/PI)
return k*(g2*atan(z2)-g1*atan(z1))
def random(self,n):
""" Use multinomial technique to return random photons from
both components."""
return two_comp_mc(n,self.p[0],self.p[1],self.p[-1],cauchy.rvs)
class LCVonMises(LCPrimitive):
""" Represent a peak from the von Mises distribution. This function is
used in directional statistics and is naturally wrapped.
Parameters:
Width inverse of the 'kappa' parameter in the std. def.
Location the center of the peak in phase
"""
def init(self):
self.p = np.asarray([0.05,0.5])
self.pnames = ['Width','Location']
self.name = 'VonMises'
self.shortname = 'VM'
def hwhm(self,right=False):
return 0.5*np.arccos(self.p[0]*np.log(0.5)+1)/TWOPI
def __call__(self,phases,log10_ens=3):
e,width,loc = self._make_p(log10_ens)
z = TWOPI*(phases-loc)
return np.exp(np.cos(z)/width)/i0(1./width)
def gradient(self,phases,log10_ens=3,free=False):
e,width,loc = self._make_p(log10_ens)
my_i0 = i0(1./width)
my_i1 = i1(1./width)
z = TWOPI*(phases-loc)
cz = np.cos(z)
sz = np.sin(z)
f = (np.exp(cz)/width)/my_i0
return np.asarray([-cz/width**2*f,TWOPI*(sz/width+my_i1/my_i0)*f])
class LCKing(LCWrappedFunction):
""" Represent a (wrapped) King function peak.
Parameters
Sigma the width parameter
Gamma the tail parameter
Location the mode of the distribution
"""
# NOTES -- because we don't integrate over solid angle, the norm
# integral / jacobean for the usual King function isn't trivial;
# need to see if this is a show stopper
def init(self):
self.p = | np.asarray([0.03,0.5]) | numpy.asarray |
import torch
import numpy as np
import pickle
def h36m_valid_angle_check(p3d):
"""
p3d: [bs,16,3] or [bs,48]
"""
if p3d.shape[-1] == 48:
p3d = p3d.reshape([p3d.shape[0], 16, 3])
cos_func = lambda p1, p2: np.sum(p1 * p2, axis=1) / np.linalg.norm(p1, axis=1) / np.linalg.norm(p2, axis=1)
data_all = p3d
valid_cos = {}
# Spine2LHip
p1 = data_all[:, 3]
p2 = data_all[:, 6]
cos_gt_l = np.sum(p1 * p2, axis=1) / np.linalg.norm(p1, axis=1) / np.linalg.norm(p2, axis=1)
# Spine2RHip
p1 = data_all[:, 0]
p2 = data_all[:, 6]
cos_gt_r = np.sum(p1 * p2, axis=1) / np.linalg.norm(p1, axis=1) / np.linalg.norm(p2, axis=1)
valid_cos['Spine2Hip'] = np.vstack((cos_gt_l, cos_gt_r))
# LLeg2LeftHipPlane
p0 = data_all[:, 3]
p1 = data_all[:, 4] - data_all[:, 3]
p2 = data_all[:, 5] - data_all[:, 4]
n0 = np.cross(p0, p1)
cos_gt_l = np.sum(n0 * p2, axis=1) / np.linalg.norm(n0, axis=1) / np.linalg.norm(p2, axis=1)
# RLeg2RHipPlane
p0 = data_all[:, 0]
p1 = data_all[:, 1] - data_all[:, 0]
p2 = data_all[:, 2] - data_all[:, 1]
n0 = np.cross(p1, p0)
cos_gt_r = np.sum(n0 * p2, axis=1) / np.linalg.norm(n0, axis=1) / np.linalg.norm(p2, axis=1)
valid_cos['Leg2HipPlane'] = np.vstack((cos_gt_l, cos_gt_r))
# Shoulder2Hip
p1 = data_all[:, 10] - data_all[:, 7]
p2 = data_all[:, 3]
cos_gt_l = np.sum(p1 * p2, axis=1) / np.linalg.norm(p1, axis=1) / np.linalg.norm(p2, axis=1)
p1 = data_all[:, 13] - data_all[:, 7]
p2 = data_all[:, 0]
cos_gt_r = np.sum(p1 * p2, axis=1) / np.linalg.norm(p1, axis=1) / np.linalg.norm(p2, axis=1)
valid_cos['Shoulder2Hip'] = np.vstack((cos_gt_l, cos_gt_r))
# Leg2ShoulderPlane
p0 = data_all[:, 13]
p1 = data_all[:, 10]
p2 = data_all[:, 4]
p3 = data_all[:, 1]
n0 = np.cross(p0, p1)
cos_gt_l = np.sum(n0 * p2, axis=1) / np.linalg.norm(n0, axis=1) / np.linalg.norm(p2, axis=1)
cos_gt_r = np.sum(n0 * p3, axis=1) / np.linalg.norm(n0, axis=1) / np.linalg.norm(p3, axis=1)
valid_cos['Leg2ShoulderPlane'] = np.vstack((cos_gt_l, cos_gt_r))
# Shoulder2Shoulder
p0 = data_all[:, 13] - data_all[:, 7]
p1 = data_all[:, 10] - data_all[:, 7]
cos_gt = np.sum(p0 * p1, axis=1) / np.linalg.norm(p0, axis=1) / np.linalg.norm(p1, axis=1)
valid_cos['Shoulder2Shoulder'] = cos_gt
# Neck2Spine
p0 = data_all[:, 7] - data_all[:, 6]
p1 = data_all[:, 6]
cos_gt = np.sum(p0 * p1, axis=1) / np.linalg.norm(p0, axis=1) / np.linalg.norm(p1, axis=1)
valid_cos['Neck2Spine'] = cos_gt
# Spine2HipPlane1
p0 = data_all[:, 3]
p1 = data_all[:, 4] - data_all[:, 3]
n0 = np.cross(p1, p0)
p2 = data_all[:, 6]
n1 = np.cross(p2, n0)
cos_dir_l = np.sum(p0 * n1, axis=1) / np.linalg.norm(p0, axis=1) / np.linalg.norm(n1, axis=1)
cos_gt_l = np.sum(n0 * p2, axis=1) / np.linalg.norm(n0, axis=1) / np.linalg.norm(p2, axis=1)
p0 = data_all[:, 0]
p1 = data_all[:, 1] - data_all[:, 0]
n0 = np.cross(p0, p1)
p2 = data_all[:, 6]
n1 = np.cross(n0, p2)
cos_dir_r = np.sum(p0 * n1, axis=1) / np.linalg.norm(p0, axis=1) / np.linalg.norm(n1, axis=1)
cos_gt_r = np.sum(n0 * p2, axis=1) / np.linalg.norm(n0, axis=1) / np.linalg.norm(p2, axis=1)
cos_gt_l1 = np.ones_like(cos_gt_l) * 0.5
cos_gt_r1 = np.ones_like(cos_gt_r) * 0.5
cos_gt_l1[cos_dir_l < 0] = cos_gt_l[cos_dir_l < 0]
cos_gt_r1[cos_dir_r < 0] = cos_gt_r[cos_dir_r < 0]
valid_cos['Spine2HipPlane1'] = np.vstack((cos_gt_l1, cos_gt_r1))
# Spine2HipPlane2
cos_gt_l2 = | np.ones_like(cos_gt_l) | numpy.ones_like |
from __future__ import division
from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, norm
from collections import OrderedDict
from numpy.random import randint
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator, MaxNLocator, LinearLocator, FixedLocator
from decimal import Decimal
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams, rcdefaults
import sys
import seaborn.apionly as sns
import pandas as pd
import numpy as np
import warnings
# These have been placed in separate .py files for reduced code clutter.
from .mpl_tools import rotateTicks, normalizeSwarmY, normalizeContrastY, offsetSwarmX, resetSwarmX, getSwarmSpan
from .mpl_tools import align_yaxis, halfviolin, drawback_y, drawback_x
from .bootstrap_tools import ci, bootstrap, bootstrap_contrast, bootstrap_indexes, jackknife_indexes, getstatarray, bca
from .plot_bootstrap_tools import plotbootstrap, plotbootstrap_hubspoke, swarmsummary
def pairedcontrast(data, x, y, idcol, reps = 3000,
statfunction = None, idx = None, figsize = None,
beforeAfterSpacer = 0.01,
violinWidth = 0.005,
floatOffset = 0.05,
showRawData = False,
showAllYAxes = False,
floatContrast = True,
smoothboot = False,
floatViolinOffset = None,
showConnections = True,
summaryBar = False,
contrastYlim = None,
swarmYlim = None,
barWidth = 0.005,
rawMarkerSize = 8,
rawMarkerType = 'o',
summaryMarkerSize = 10,
summaryMarkerType = 'o',
summaryBarColor = 'grey',
meansSummaryLineStyle = 'solid',
contrastZeroLineStyle = 'solid', contrastEffectSizeLineStyle = 'solid',
contrastZeroLineColor = 'black', contrastEffectSizeLineColor = 'black',
pal = None,
legendLoc = 2, legendFontSize = 12, legendMarkerScale = 1,
axis_title_size = None,
yticksize = None,
xticksize = None,
tickAngle=45,
tickAlignment='right',
**kwargs):
# Preliminaries.
data = data.dropna()
# plot params
if axis_title_size is None:
axis_title_size = 15
if yticksize is None:
yticksize = 12
if xticksize is None:
xticksize = 12
axisTitleParams = {'labelsize' : axis_title_size}
xtickParams = {'labelsize' : xticksize}
ytickParams = {'labelsize' : yticksize}
rc('axes', **axisTitleParams)
rc('xtick', **xtickParams)
rc('ytick', **ytickParams)
## If `idx` is not specified, just take the FIRST TWO levels alphabetically.
if idx is None:
idx = tuple(np.unique(data[x])[0:2],)
else:
# check if multi-plot or not
if all(isinstance(element, str) for element in idx):
# if idx is supplied but not a multiplot (ie single list or tuple)
if len(idx) != 2:
print(idx, "does not have length 2.")
sys.exit(0)
else:
idx = (tuple(idx, ),)
elif all(isinstance(element, tuple) for element in idx):
# if idx is supplied, and it is a list/tuple of tuples or lists, we have a multiplot!
if ( any(len(element) != 2 for element in idx) ):
# If any of the tuples contain more than 2 elements.
print(element, "does not have length 2.")
sys.exit(0)
if floatViolinOffset is None:
floatViolinOffset = beforeAfterSpacer/2
if contrastYlim is not None:
contrastYlim = np.array([contrastYlim[0],contrastYlim[1]])
if swarmYlim is not None:
swarmYlim = np.array([swarmYlim[0],swarmYlim[1]])
## Here we define the palette on all the levels of the 'x' column.
## Thus, if the same pandas dataframe is re-used across different plots,
## the color identity of each group will be maintained.
## Set palette based on total number of categories in data['x'] or data['hue_column']
if 'hue' in kwargs:
u = kwargs['hue']
else:
u = x
if ('color' not in kwargs and 'hue' not in kwargs):
kwargs['color'] = 'k'
if pal is None:
pal = dict( zip( data[u].unique(), sns.color_palette(n_colors = len(data[u].unique())) )
)
else:
pal = pal
# Initialise figure.
if figsize is None:
if len(idx) > 2:
figsize = (12,(12/np.sqrt(2)))
else:
figsize = (6,6)
fig = plt.figure(figsize = figsize)
# Initialise GridSpec based on `levs_tuple` shape.
gsMain = gridspec.GridSpec( 1, np.shape(idx)[0]) # 1 row; columns based on number of tuples in tuple.
# Set default statfunction
if statfunction is None:
statfunction = np.mean
# Create list to collect all the contrast DataFrames generated.
contrastList = list()
contrastListNames = list()
for gsIdx, xlevs in enumerate(idx):
## Pivot tempdat to get before and after lines.
data_pivot = data.pivot_table(index = idcol, columns = x, values = y)
# Start plotting!!
if floatContrast is True:
ax_raw = fig.add_subplot(gsMain[gsIdx], frame_on = False)
ax_contrast = ax_raw.twinx()
else:
gsSubGridSpec = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec = gsMain[gsIdx])
ax_raw = plt.Subplot(fig, gsSubGridSpec[0, 0], frame_on = False)
ax_contrast = plt.Subplot(fig, gsSubGridSpec[1, 0], sharex = ax_raw, frame_on = False)
## Plot raw data as swarmplot or stripplot.
if showRawData is True:
swarm_raw = sns.swarmplot(data = data,
x = x, y = y,
order = xlevs,
ax = ax_raw,
palette = pal,
size = rawMarkerSize,
marker = rawMarkerType,
**kwargs)
else:
swarm_raw = sns.stripplot(data = data,
x = x, y = y,
order = xlevs,
ax = ax_raw,
palette = pal,
**kwargs)
swarm_raw.set_ylim(swarmYlim)
## Get some details about the raw data.
maxXBefore = max(swarm_raw.collections[0].get_offsets().T[0])
minXAfter = min(swarm_raw.collections[1].get_offsets().T[0])
if showRawData is True:
#beforeAfterSpacer = (getSwarmSpan(swarm_raw, 0) + getSwarmSpan(swarm_raw, 1))/2
beforeAfterSpacer = 1
xposAfter = maxXBefore + beforeAfterSpacer
xAfterShift = minXAfter - xposAfter
## shift the after swarmpoints closer for aesthetic purposes.
offsetSwarmX(swarm_raw.collections[1], -xAfterShift)
## pandas DataFrame of 'before' group
x1 = pd.DataFrame({str(xlevs[0] + '_x') : pd.Series(swarm_raw.collections[0].get_offsets().T[0]),
xlevs[0] : pd.Series(swarm_raw.collections[0].get_offsets().T[1]),
'_R_' : pd.Series(swarm_raw.collections[0].get_facecolors().T[0]),
'_G_' : pd.Series(swarm_raw.collections[0].get_facecolors().T[1]),
'_B_' : pd.Series(swarm_raw.collections[0].get_facecolors().T[2]),
})
## join the RGB columns into a tuple, then assign to a column.
x1['_hue_'] = x1[['_R_', '_G_', '_B_']].apply(tuple, axis=1)
x1 = x1.sort_values(by = xlevs[0])
x1.index = data_pivot.sort_values(by = xlevs[0]).index
## pandas DataFrame of 'after' group
### create convenient signifiers for column names.
befX = str(xlevs[0] + '_x')
aftX = str(xlevs[1] + '_x')
x2 = pd.DataFrame( {aftX : pd.Series(swarm_raw.collections[1].get_offsets().T[0]),
xlevs[1] : pd.Series(swarm_raw.collections[1].get_offsets().T[1])} )
x2 = x2.sort_values(by = xlevs[1])
x2.index = data_pivot.sort_values(by = xlevs[1]).index
## Join x1 and x2, on both their indexes.
plotPoints = x1.merge(x2, left_index = True, right_index = True, how='outer')
## Add the hue column if hue argument was passed.
if 'hue' in kwargs:
h = kwargs['hue']
plotPoints[h] = data.pivot(index = idcol, columns = x, values = h)[xlevs[0]]
swarm_raw.legend(loc = legendLoc,
fontsize = legendFontSize,
markerscale = legendMarkerScale)
## Plot the lines to join the 'before' points to their respective 'after' points.
if showConnections is True:
for i in plotPoints.index:
ax_raw.plot([ plotPoints.ix[i, befX],
plotPoints.ix[i, aftX] ],
[ plotPoints.ix[i, xlevs[0]],
plotPoints.ix[i, xlevs[1]] ],
linestyle = 'solid',
color = plotPoints.ix[i, '_hue_'],
linewidth = 0.75,
alpha = 0.75
)
## Hide the raw swarmplot data if so desired.
if showRawData is False:
swarm_raw.collections[0].set_visible(False)
swarm_raw.collections[1].set_visible(False)
if showRawData is True:
#maxSwarmSpan = max(np.array([getSwarmSpan(swarm_raw, 0), getSwarmSpan(swarm_raw, 1)]))/2
maxSwarmSpan = 0.5
else:
maxSwarmSpan = barWidth
## Plot Summary Bar.
if summaryBar is True:
# Calculate means
means = data.groupby([x], sort = True).mean()[y]
# # Calculate medians
# medians = data.groupby([x], sort = True).median()[y]
## Draw summary bar.
bar_raw = sns.barplot(x = means.index,
y = means.values,
order = xlevs,
ax = ax_raw,
ci = 0,
facecolor = summaryBarColor,
alpha = 0.25)
## Draw zero reference line.
ax_raw.add_artist(Line2D(
(ax_raw.xaxis.get_view_interval()[0],
ax_raw.xaxis.get_view_interval()[1]),
(0,0),
color='black', linewidth=0.75
)
)
## get swarm with largest span, set as max width of each barplot.
for i, bar in enumerate(bar_raw.patches):
x_width = bar.get_x()
width = bar.get_width()
centre = x_width + width/2.
if i == 0:
bar.set_x(centre - maxSwarmSpan/2.)
else:
bar.set_x(centre - xAfterShift - maxSwarmSpan/2.)
bar.set_width(maxSwarmSpan)
# Get y-limits of the treatment swarm points.
beforeRaw = pd.DataFrame( swarm_raw.collections[0].get_offsets() )
afterRaw = pd.DataFrame( swarm_raw.collections[1].get_offsets() )
before_leftx = min(beforeRaw[0])
after_leftx = min(afterRaw[0])
after_rightx = max(afterRaw[0])
after_stat_summary = statfunction(beforeRaw[1])
# Calculate the summary difference and CI.
plotPoints['delta_y'] = plotPoints[xlevs[1]] - plotPoints[xlevs[0]]
plotPoints['delta_x'] = [0] * np.shape(plotPoints)[0]
tempseries = plotPoints['delta_y'].tolist()
test = tempseries.count(tempseries[0]) != len(tempseries)
bootsDelta = bootstrap(plotPoints['delta_y'],
statfunction = statfunction,
smoothboot = smoothboot,
reps = reps)
summDelta = bootsDelta['summary']
lowDelta = bootsDelta['bca_ci_low']
highDelta = bootsDelta['bca_ci_high']
# set new xpos for delta violin.
if floatContrast is True:
if showRawData is False:
xposPlusViolin = deltaSwarmX = after_rightx + floatViolinOffset
else:
xposPlusViolin = deltaSwarmX = after_rightx + maxSwarmSpan
else:
xposPlusViolin = xposAfter
if showRawData is True:
# If showRawData is True and floatContrast is True,
# set violinwidth to the barwidth.
violinWidth = maxSwarmSpan
xmaxPlot = xposPlusViolin + violinWidth
# Plot the summary measure.
ax_contrast.plot(xposPlusViolin, summDelta,
marker = 'o',
markerfacecolor = 'k',
markersize = summaryMarkerSize,
alpha = 0.75
)
# Plot the CI.
ax_contrast.plot([xposPlusViolin, xposPlusViolin],
[lowDelta, highDelta],
color = 'k',
alpha = 0.75,
linestyle = 'solid'
)
# Plot the violin-plot.
v = ax_contrast.violinplot(bootsDelta['stat_array'], [xposPlusViolin],
widths = violinWidth,
showextrema = False,
showmeans = False)
halfviolin(v, half = 'right', color = 'k')
# Remove left axes x-axis title.
ax_raw.set_xlabel("")
# Remove floating axes y-axis title.
ax_contrast.set_ylabel("")
# Set proper x-limits
ax_raw.set_xlim(before_leftx - beforeAfterSpacer/2, xmaxPlot)
ax_raw.get_xaxis().set_view_interval(before_leftx - beforeAfterSpacer/2,
after_rightx + beforeAfterSpacer/2)
ax_contrast.set_xlim(ax_raw.get_xlim())
if floatContrast is True:
# Set the ticks locations for ax_raw.
ax_raw.get_xaxis().set_ticks((0, xposAfter))
# Make sure they have the same y-limits.
ax_contrast.set_ylim(ax_raw.get_ylim())
# Drawing in the x-axis for ax_raw.
## Set the tick labels!
ax_raw.set_xticklabels(xlevs, rotation = tickAngle, horizontalalignment = tickAlignment)
## Get lowest y-value for ax_raw.
y = ax_raw.get_yaxis().get_view_interval()[0]
# Align the left axes and the floating axes.
align_yaxis(ax_raw, statfunction(plotPoints[xlevs[0]]),
ax_contrast, 0)
# Add label to floating axes. But on ax_raw!
ax_raw.text(x = deltaSwarmX,
y = ax_raw.get_yaxis().get_view_interval()[0],
horizontalalignment = 'left',
s = 'Difference',
fontsize = 15)
# Set reference lines
## zero line
ax_contrast.hlines(0, # y-coordinate
ax_contrast.xaxis.get_majorticklocs()[0], # x-coordinates, start and end.
ax_raw.xaxis.get_view_interval()[1],
linestyle = 'solid',
linewidth = 0.75,
color = 'black')
## effect size line
ax_contrast.hlines(summDelta,
ax_contrast.xaxis.get_majorticklocs()[1],
ax_raw.xaxis.get_view_interval()[1],
linestyle = 'solid',
linewidth = 0.75,
color = 'black')
# Align the left axes and the floating axes.
align_yaxis(ax_raw, after_stat_summary, ax_contrast, 0.)
else:
# Set the ticks locations for ax_raw.
ax_raw.get_xaxis().set_ticks((0, xposAfter))
fig.add_subplot(ax_raw)
fig.add_subplot(ax_contrast)
ax_contrast.set_ylim(contrastYlim)
# Calculate p-values.
# 1-sample t-test to see if the mean of the difference is different from 0.
ttestresult = ttest_1samp(plotPoints['delta_y'], popmean = 0)[1]
bootsDelta['ttest_pval'] = ttestresult
contrastList.append(bootsDelta)
contrastListNames.append( str(xlevs[1])+' v.s. '+str(xlevs[0]) )
# Turn contrastList into a pandas DataFrame,
contrastList = pd.DataFrame(contrastList).T
contrastList.columns = contrastListNames
# Now we iterate thru the contrast axes to normalize all the ylims.
for j,i in enumerate(range(1, len(fig.get_axes()), 2)):
axx=fig.get_axes()[i]
## Get max and min of the dataset.
lower = np.min(contrastList.ix['stat_array',j])
upper = | np.max(contrastList.ix['stat_array',j]) | numpy.max |
import numba
import numpy as np
@numba.jit(nopython=True, cache=True)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
@numba.jit(nopython=True, cache=True)
def hid1_to_vis(h1, w1, vis_b):
return sigmoid(np.dot(w1, h1.astype(np.float32)) + vis_b)
@numba.jit(nopython=True, cache=True)
def hid1_to_hid2(h1, w2, hid_b2):
return sigmoid(np.dot(h1.astype(np.float32), w2) + hid_b2)
@numba.jit(nopython=True, cache=True)
def vis_hid2_to_hid1(v, h2, w1, w2, hid_b1):
return sigmoid(
np.dot(w2, h2.astype(np.float32)) + np.dot(v.astype(np.float32), w1) + hid_b1
)
@numba.jit(nopython=True, cache=True)
def gibbs_sampling_with_mask(
n_samples,
v,
mask,
w1,
w2,
hid_b1,
hid_b2,
vis_b,
n_warmup_samples=5000,
random_h1=False,
):
# Initialize h1
if random_h1:
h1 = np.array([np.random.binomial(1, 0.5) for _ in range(hid_b1.shape[0])])
else:
h1 = gibbs_initialize_h1_from_vh2_with_mask(
v, mask, w1, w2, hid_b1, hid_b2, vis_b
)
for ii in range(n_warmup_samples):
h1 = gibbs_h1_vh2_h1_with_mask(h1, v, mask, w1, w2, hid_b1, hid_b2, vis_b)
means_for_v = np.zeros((n_samples, v.shape[0]))
for ii in range(n_samples):
h1 = gibbs_h1_vh2_h1_with_mask(h1, v, mask, w1, w2, hid_b1, hid_b2, vis_b)
means_for_v[ii] = hid1_to_vis(h1, w1, vis_b)
return means_for_v
@numba.jit(nopython=True, cache=True)
def gibbs_h1_vh2_h1_with_mask(h1, v_true, mask, w1, w2, hid_b1, hid_b2, vis_b):
# h1 to v, h2
v = np.array([np.random.binomial(1, p) for p in hid1_to_vis(h1, w1, vis_b)])
v[mask == 0] = v_true[mask == 0]
h2 = np.array([np.random.binomial(1, p) for p in hid1_to_hid2(h1, w2, hid_b2)])
# v, h2 to h1
h1 = np.array(
[ | np.random.binomial(1, p) | numpy.random.binomial |
import numpy as np
from sklearn.metrics.cluster import silhouette_score
from sklearn.cluster import KMeans
import pandas as pd
from sklearn.metrics.pairwise import euclidean_distances
def init_population(ind, N, K):
return np.random.randint(0, K, (ind,N))
def get_fitness(data, population):
return np.array([silhouette_score(data, x) for x in population])
def local_kmeans(data: pd.DataFrame, population, iter_max):
new_pop = np.zeros(population.shape)
for ind in range(len(population)):
indexes = [np.where(population[ind] == unq) for unq in np.unique(population[ind])]
centroids = np.array([np.average(data.iloc[x], axis=0) for x in indexes])
kmeans = KMeans(n_clusters=len(centroids), init=centroids, max_iter=iter_max)
kmeans.fit(data)
new_pop[ind] = kmeans.labels_
return new_pop
def roulette_selection(population, fitness, n_select):
total = sum(fitness)
acc_fit = 0
roulette = [-1]
pos = 0
new_population = []
for fit in fitness:
acc_fit += fit
roulette.append(acc_fit/total)
roulette.append(1.1)
for i in range(n_select):
p = np.random.rand()
for j, value in enumerate(roulette):
if p < value:
pos = j
break
new_population.append(population[pos - 1])
return np.array(new_population)
def opDivision(data: pd.DataFrame, individual):
new_ind = individual
labels = np.unique(individual)
k = len(labels)
k = np.random.randint(0, k)
cluster = data.iloc[np.where(individual == k)]
indexes = cluster.index.values
if cluster.shape[0] <= 3:
return individual
centroids = np.array([np.average(cluster.values, axis=0)])
dist = euclidean_distances(centroids, cluster.values)[0]
centroids = np.append(centroids, cluster.values[np.argmax(dist)].reshape(1, -1), axis=0)
d = euclidean_distances(centroids, cluster.values)
labelA = max(labels) + 1
labelB = labelA + 1
for index in range(len(cluster.values)):
if d[0][index] < d[1][index]:
new_ind[indexes[index]] = labelA
else:
new_ind[indexes[index]] = labelB
return new_ind
def opExclusion(data: pd.DataFrame, individual):
labels = np.unique(individual)
if len(labels) <= 2:
return individual
centroids = [np.average(data.iloc[np.where(individual == unq)], axis=0) for unq in labels]
k = len(labels)
k = np.random.randint(0, k)
centroids.pop(k)
dist = euclidean_distances(centroids, data)
return np.argmin(dist.T, axis=1)
def get_EAC(data: pd.DataFrame):
N = data.shape[0]
n_individuals = 4
n_cluster = 5
itKmeans = 2
itEAC = 1000
window_size = 50
window = np.zeros(window_size)
wi = 0
population = init_population(n_individuals, N, n_cluster)
for it in range(itEAC):
population = local_kmeans(data, population, itKmeans)
fitness = get_fitness(data, population)
best_idx = np.argmax(fitness)
best_fitness = fitness[best_idx]
best_ind = population[best_idx]
population = roulette_selection(population, fitness, n_individuals)
idx = np.arange(n_individuals)
| np.random.shuffle(idx) | numpy.random.shuffle |
import os
import numpy as np
import pytest
import xarray as xr
from xclim import atmos
from xclim.core.calendar import percentile_doy
from xclim.core.options import set_options
from xclim.core.units import convert_units_to
from xclim.testing import open_dataset
K2C = 273.15
class TestCSDI:
def test_simple(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn += K2C
tn[10:20] -= 2
tn = tasmin_series(tn)
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert out[0] == 10
def test_convert_units(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn[10:20] -= 2
tn = tasmin_series(tn + K2C)
tn.attrs["units"] = "C"
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert out[0] == 10
def test_nan_presence(self, tasmin_series):
i = 3650
A = 10.0
tn = (
np.zeros(i)
+ K2C
+ A * np.sin(np.arange(i) / 365.0 * 2 * np.pi)
+ 0.1 * np.random.rand(i)
)
tn[10:20] -= 2
tn[9] = np.nan
tn = tasmin_series(tn)
tn10 = percentile_doy(tn, per=10).sel(percentiles=10)
out = atmos.cold_spell_duration_index(tn, tn10, freq="AS-JUL")
assert np.isnan(out[0])
class TestDTR:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_DTR_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
dtr = atmos.daily_temperature_range(tasmin, tasmax, freq="MS")
dtrC = atmos.daily_temperature_range(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
dtr1 = max1 - min1
np.testing.assert_array_equal(dtr, dtrC)
assert dtr.attrs["units"] == "K"
assert np.allclose(dtr1[0:31].mean(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
dtr = atmos.max_daily_temperature_range(tasmin, tasmax, freq="MS")
dtrC = atmos.max_daily_temperature_range(tasmin_C, tasmax_C, freq="MS")
np.testing.assert_array_equal(dtr, dtrC)
assert dtr.attrs["units"] == "K"
assert np.allclose(dtr1[0:31].max(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
class TestDTRVar:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_dtr_var_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
dtr = atmos.daily_temperature_range_variability(tasmin, tasmax, freq="MS")
dtrC = atmos.daily_temperature_range_variability(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
assert dtr.attrs["units"] == "K"
dtr1a = max1 - min1
dtr1 = abs(np.diff(dtr1a))
np.testing.assert_array_equal(dtr, dtrC)
# first month jan use 0:30 (n==30) because of day to day diff
assert np.allclose(dtr1[0:30].mean(), dtr.values[0, 0, 0])
assert np.isnan(dtr.values[1, 1, 0])
assert np.isnan(dtr.values[0, -1, -1])
class TestETR:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_dtr_var_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmax_C = open_dataset(self.nc_tasmax).tasmax
tasmax_C -= K2C
tasmax_C.attrs["units"] = "C"
tasmin = open_dataset(self.nc_tasmin).tasmin
tasmin_C = open_dataset(self.nc_tasmin).tasmin
tasmin_C -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[32, 1, 0] = np.nan
tasmin_C.values[32, 1, 0] = np.nan
etr = atmos.extreme_temperature_range(tasmin, tasmax, freq="MS")
etrC = atmos.extreme_temperature_range(tasmin_C, tasmax_C, freq="MS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
np.testing.assert_array_equal(etr, etrC)
etr1 = max1[0:31].max() - min1[0:31].min()
assert np.allclose(etr1, etr.values[0, 0, 0])
assert np.isnan(etr.values[1, 1, 0])
assert np.isnan(etr.values[0, -1, -1])
class TestTmean:
nc_files = (
os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc"),
os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc"),
)
def test_Tmean_3d_data(self):
ds_tmax = open_dataset(self.nc_files[0])
ds_tmin = open_dataset(self.nc_files[1])
tas = atmos.tg(ds_tmin.tasmin, ds_tmax.tasmax)
tas_C = atmos.tg(ds_tmin.tasmin, ds_tmax.tasmax)
tas_C.values -= K2C
tas_C.attrs["units"] = "C"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas_C.values[180, 1, 0] = np.nan
tmmean = atmos.tg_mean(tas)
tmmeanC = atmos.tg_mean(tas_C)
x1 = tas.values[:, 0, 0]
tmmean1 = x1.mean()
# TODO: Investigate the differences between the two outputs.
# The conversion to K is done after / before the mean.
np.testing.assert_array_almost_equal(tmmeanC, tmmean, 3)
# test single point vs manual
assert np.allclose(tmmean1, tmmean.values[0, 0, 0], tmmeanC.values[0, 0, 0])
# test single nan point
assert np.isnan(tmmean.values[0, 1, 0])
# test all nan point
assert np.isnan(tmmean.values[0, -1, -1])
class TestTx:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_TX_3d_data(self):
tasmax = open_dataset(self.nc_file).tasmax
tasmax_C = open_dataset(self.nc_file).tasmax
tasmax_C.values -= K2C
tasmax_C.attrs["units"] = "C"
# put a nan somewhere
tasmax.values[180, 1, 0] = np.nan
tasmax_C.values[180, 1, 0] = np.nan
txmean = atmos.tx_mean(tasmax)
txmax = atmos.tx_max(tasmax)
txmin = atmos.tx_min(tasmax)
txmeanC = atmos.tx_mean(tasmax_C)
txmaxC = atmos.tx_max(tasmax_C)
txminC = atmos.tx_min(tasmax_C)
no_nan = (
~np.isnan(txmean).values & ~np.isnan(txmax).values & ~np.isnan(txmin).values
)
# test maxes always greater than mean and mean always greater than min (non nan values only)
assert np.all(txmax.values[no_nan] > txmean.values[no_nan]) & np.all(
txmean.values[no_nan] > txmin.values[no_nan]
)
np.testing.assert_array_almost_equal(txmeanC, txmean, 3)
np.testing.assert_array_equal(txminC, txmin)
np.testing.assert_array_equal(txmaxC, txmax)
x1 = tasmax.values[:, 0, 0]
txmean1 = x1.mean()
txmin1 = x1.min()
txmax1 = x1.max()
# test single point vs manual
assert np.allclose(txmean1, txmean.values[0, 0, 0], txmeanC.values[0, 0, 0])
assert np.allclose(txmax1, txmax.values[0, 0, 0], txmaxC.values[0, 0, 0])
assert np.allclose(txmin1, txmin.values[0, 0, 0], txminC.values[0, 0, 0])
# test single nan point
assert np.isnan(txmean.values[0, 1, 0])
assert np.isnan(txmin.values[0, 1, 0])
assert np.isnan(txmax.values[0, 1, 0])
# test all nan point
assert np.isnan(txmean.values[0, -1, -1])
assert np.isnan(txmin.values[0, -1, -1])
assert np.isnan(txmax.values[0, -1, -1])
class TestTn:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_TN_3d_data(self):
tasmin = open_dataset(self.nc_file).tasmin
tasmin_C = open_dataset(self.nc_file).tasmin
tasmin_C.values -= K2C
tasmin_C.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[180, 1, 0] = np.nan
tasmin_C.values[180, 1, 0] = np.nan
tnmean = atmos.tn_mean(tasmin)
tnmax = atmos.tn_max(tasmin)
tnmin = atmos.tn_min(tasmin)
tnmeanC = atmos.tn_mean(tasmin_C)
tnmaxC = atmos.tn_max(tasmin_C)
tnminC = atmos.tn_min(tasmin_C)
no_nan = (
~np.isnan(tnmean).values & ~np.isnan(tnmax).values & ~np.isnan(tnmin).values
)
# test maxes always greater than mean and mean alwyas greater than min (non nan values only)
assert np.all(tnmax.values[no_nan] > tnmean.values[no_nan]) & np.all(
tnmean.values[no_nan] > tnmin.values[no_nan]
)
np.testing.assert_array_almost_equal(tnmeanC, tnmean, 3)
np.testing.assert_array_equal(tnminC, tnmin)
np.testing.assert_array_equal(tnmaxC, tnmax)
x1 = tasmin.values[:, 0, 0]
txmean1 = x1.mean()
txmin1 = x1.min()
txmax1 = x1.max()
# test single point vs manual
assert np.allclose(txmean1, tnmean.values[0, 0, 0], tnmeanC.values[0, 0, 0])
assert np.allclose(txmax1, tnmax.values[0, 0, 0], tnmaxC.values[0, 0, 0])
assert np.allclose(txmin1, tnmin.values[0, 0, 0], tnminC.values[0, 0, 0])
# test single nan point
assert np.isnan(tnmean.values[0, 1, 0])
assert np.isnan(tnmin.values[0, 1, 0])
assert np.isnan(tnmax.values[0, 1, 0])
# test all nan point
assert np.isnan(tnmean.values[0, -1, -1])
assert np.isnan(tnmin.values[0, -1, -1])
assert np.isnan(tnmax.values[0, -1, -1])
class TestConsecutiveFrostDays:
def test_one_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [1])
def test_three_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [3])
def test_two_equal_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
a[6:9] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [3])
def test_two_events_freeze_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2:5] -= 20
a[6:10] -= 20
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [4])
def test_convert_units_freeze_day(self, tasmin_series):
a = np.zeros(365) + 5.0
a[2:5] -= 20
a[6:10] -= 20
ts = tasmin_series(a)
ts.attrs["units"] = "C"
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [4])
def test_one_nan_day(self, tasmin_series):
a = np.zeros(365) + K2C + 5.0
a[2] -= 20
a[-1] = np.nan
ts = tasmin_series(a)
out = atmos.consecutive_frost_days(ts)
np.testing.assert_array_equal(out, [np.nan])
class TestConsecutiveFrostFreeDays:
def test_real_data(self, atmosds):
tasmin = atmosds.tasmin
test = atmos.maximum_consecutive_frost_free_days(tasmin)
np.testing.assert_allclose(test[2, 0], [68], rtol=1e-1)
assert (
"Annual maximum number of consecutive days with minimum daily temperature above or equal to 0 degc."
) in test.description
class TestFrostSeasonLength:
def test_simple(self, tasmin_series):
a = np.zeros(730) + K2C + 15
a[300:400] = K2C - 5
a[404:407] = K2C - 5
tasmin = tasmin_series(a, start="2000-01-01")
# Default, window = 5, mid_date = 07-01, freq= AS-JUL
out = atmos.frost_season_length(tasmin=tasmin)
np.testing.assert_array_equal(out, [np.nan, 107, np.nan])
out = atmos.frost_season_length(tasmin=tasmin, window=3)
np.testing.assert_array_equal(out, [np.nan, 100, np.nan])
out = atmos.frost_season_length(tasmin=tasmin, mid_date="07-01", freq="YS")
np.testing.assert_array_equal(out, [np.nan, np.nan])
class TestColdSpellDays:
def test_simple(self, tas_series):
a = np.zeros(365) + K2C
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
ts = tas_series(a)
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_convert_units(self, tas_series):
a = np.zeros(365)
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
ts = tas_series(a)
ts.attrs["units"] = "C"
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_nan_presence(self, tas_series):
a = np.zeros(365) + K2C
a[10:20] -= 15 # 10 days
a[40:43] -= 50 # too short -> 0
a[80:100] -= 30 # at the end and beginning
a[-1] = np.nan
ts = tas_series(a)
out = atmos.cold_spell_days(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [10, 0, 12, 8, 0, 0, 0, 0, 0, 0, 0, np.nan])
out = atmos.cold_spell_frequency(ts, thresh="-10 C", freq="MS")
np.testing.assert_array_equal(out, [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, np.nan])
class TestFrostDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tasmin = open_dataset(self.nc_file).tasmin
tasminC = open_dataset(self.nc_file).tasmin
tasminC -= K2C
tasminC.attrs["units"] = "C"
# put a nan somewhere
tasmin.values[180, 1, 0] = np.nan
tasminC.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 273.16
fd = atmos.frost_days(tasmin, freq="YS")
fdC = atmos.frost_days(tasminC, freq="YS")
# fds = xci.frost_days(tasmin, thresh=thresh, freq='YS', skipna=True)
x1 = tasmin.values[:, 0, 0]
fd1 = (x1[x1 < thresh]).size
np.testing.assert_array_equal(fd, fdC)
assert np.allclose(fd1, fd.values[0, 0, 0])
# assert (np.allclose(fd1, fds.values[0, 0, 0]))
assert np.isnan(fd.values[0, 1, 0])
# assert (np.allclose(fd2, fds.values[0, 1, 0]))
assert np.isnan(fd.values[0, -1, -1])
# assert (np.isnan(fds.values[0, -1, -1]))
class TestIceDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tasC = open_dataset(self.nc_file).tasmax
tasC -= K2C
tasC.attrs["units"] = "C"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tasC.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 273.16
fd = atmos.ice_days(tas, freq="YS")
fdC = atmos.ice_days(tasC, freq="YS")
x1 = tas.values[:, 0, 0]
fd1 = (x1[x1 < thresh]).size
np.testing.assert_array_equal(fd, fdC)
assert np.allclose(fd1, fd.values[0, 0, 0])
assert np.isnan(fd.values[0, 1, 0])
assert np.isnan(fd.values[0, -1, -1])
class TestCoolingDegreeDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tas.attrs["cell_methods"] = "time: mean within days"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 18 + K2C
cdd = atmos.cooling_degree_days(tas, thresh="18 C", freq="YS")
x1 = tas.values[:, 0, 0]
cdd1 = (x1[x1 > thresh] - thresh).sum()
assert np.allclose(cdd1, cdd.values[0, 0, 0])
assert np.isnan(cdd.values[0, 1, 0])
assert np.isnan(cdd.values[0, -1, -1])
def test_convert_units(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tas.values -= K2C
tas.attrs["units"] = "C"
tas.attrs["cell_methods"] = "time: mean within days"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = 18
cdd = atmos.cooling_degree_days(tas, thresh="18 C", freq="YS")
x1 = tas.values[:, 0, 0]
# x2 = tas.values[:, 1, 0]
cdd1 = (x1[x1 > thresh] - thresh).sum()
# gdd2 = (x2[x2 > thresh] - thresh).sum()
assert np.allclose(cdd1, cdd.values[0, 0, 0])
# assert (np.allclose(gdd1, gdds.values[0, 0, 0]))
assert np.isnan(cdd.values[0, 1, 0])
# assert (np.allclose(gdd2, gdds.values[0, 1, 0]))
assert np.isnan(cdd.values[0, -1, -1])
# assert (np.isnan(gdds.values[0, -1, -1]))
class TestHeatingDegreeDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas.attrs["cell_methods"] = "time: mean within days"
# compute with both skipna options
thresh = 17 + K2C
hdd = atmos.heating_degree_days(tas, freq="YS")
x1 = tas.values[:, 0, 0]
hdd1 = (thresh - x1).clip(min=0).sum()
assert np.allclose(hdd1, hdd.values[0, 0, 0])
assert np.isnan(hdd.values[0, 1, 0])
assert np.isnan(hdd.values[0, -1, -1])
def test_convert_units(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
tas.values -= K2C
tas.attrs["units"] = "C"
tas.attrs["cell_methods"] = "time: mean within days"
# compute with both skipna options
thresh = 17
hdd = atmos.heating_degree_days(tas, freq="YS")
x1 = tas.values[:, 0, 0]
hdd1 = (thresh - x1).clip(min=0).sum()
assert np.allclose(hdd1, hdd.values[0, 0, 0])
assert np.isnan(hdd.values[0, 1, 0])
assert np.isnan(hdd.values[0, -1, -1])
class TestGrowingDegreeDays:
nc_file = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
def test_3d_data_with_nans(self):
# test with 3d data
tas = open_dataset(self.nc_file).tasmax
tas.attrs["cell_methods"] = "time: mean within days"
# put a nan somewhere
tas.values[180, 1, 0] = np.nan
# compute with both skipna options
thresh = K2C + 4
gdd = atmos.growing_degree_days(tas, freq="YS")
# gdds = xci.growing_degree_days(tas, thresh=thresh, freq='YS', skipna=True)
x1 = tas.values[:, 0, 0]
# x2 = tas.values[:, 1, 0]
gdd1 = (x1[x1 > thresh] - thresh).sum()
# gdd2 = (x2[x2 > thresh] - thresh).sum()
assert np.allclose(gdd1, gdd.values[0, 0, 0])
assert np.isnan(gdd.values[0, 1, 0])
assert np.isnan(gdd.values[0, -1, -1])
class TestHeatWaveFrequency:
def test_1d(self, tasmax_series, tasmin_series):
tn1 = np.zeros(366)
tx1 = np.zeros(366)
tn1[:10] = np.array([20, 23, 23, 23, 23, 21, 23, 23, 23, 23])
tx1[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tn = tasmin_series(tn1 + K2C, start="1/1/2000")
tx = tasmax_series(tx1 + K2C, start="1/1/2000")
tnC = tasmin_series(tn1, start="1/1/2000")
tnC.attrs["units"] = "C"
txC = tasmax_series(tx1, start="1/1/2000")
txC.attrs["units"] = "C"
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
hwfC = atmos.heat_wave_frequency(
tnC, txC, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
np.testing.assert_array_equal(hwf, hwfC)
np.testing.assert_allclose(hwf.values[:1], 2)
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", window=4, freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 1)
# one long hw
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="10 C", thresh_tasmax="10 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 1)
# no hw
hwf = atmos.heat_wave_frequency(
tn, tx, thresh_tasmin="40 C", thresh_tasmax="40 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 0)
class TestHeatWaveMaxLength:
def test_1d(self, tasmax_series, tasmin_series):
tn1 = np.zeros(366)
tx1 = np.zeros(366)
tn1[:10] = np.array([20, 23, 23, 23, 23, 21, 23, 23, 23, 23])
tx1[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tn = tasmin_series(tn1 + K2C, start="1/1/2000")
tx = tasmax_series(tx1 + K2C, start="1/1/2000")
tnC = tasmin_series(tn1, start="1/1/2000")
tnC.attrs["units"] = "C"
txC = tasmax_series(tx1, start="1/1/2000")
txC.attrs["units"] = "C"
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
hwfC = atmos.heat_wave_max_length(
tnC, txC, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
np.testing.assert_array_equal(hwf, hwfC)
np.testing.assert_allclose(hwf.values[:1], 4)
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="20 C", thresh_tasmax="30 C", window=4, freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 5)
# one long hw
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="10 C", thresh_tasmax="10 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 10)
# no hw
hwf = atmos.heat_wave_max_length(
tn, tx, thresh_tasmin="40 C", thresh_tasmax="40 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 0)
class TestHeatWaveTotalLength:
def test_1d(self, tasmax_series, tasmin_series):
tn1 = np.zeros(366)
tx1 = np.zeros(366)
tn1[:10] = np.array([20, 23, 23, 23, 23, 21, 23, 23, 23, 23])
tx1[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tn = tasmin_series(tn1 + K2C, start="1/1/2000")
tx = tasmax_series(tx1 + K2C, start="1/1/2000")
tnC = tasmin_series(tn1, start="1/1/2000")
tnC.attrs["units"] = "C"
txC = tasmax_series(tx1, start="1/1/2000")
txC.attrs["units"] = "C"
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
hwfC = atmos.heat_wave_total_length(
tnC, txC, thresh_tasmin="22 C", thresh_tasmax="30 C", freq="YS"
)
np.testing.assert_array_equal(hwf, hwfC)
np.testing.assert_allclose(hwf.values[:1], 7)
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="20 C", thresh_tasmax="30 C", window=4, freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 5)
# one long hw
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="10 C", thresh_tasmax="10 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 10)
# no hw
hwf = atmos.heat_wave_total_length(
tn, tx, thresh_tasmin="40 C", thresh_tasmax="40 C", freq="YS"
)
np.testing.assert_allclose(hwf.values[:1], 0)
class TestHeatWaveIndex:
def test_simple(self, tasmax_series):
tx = np.zeros(366)
tx[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tx = tasmax_series(tx + K2C, start="1/1/2000")
hwi = atmos.heat_wave_index(tx, freq="YS")
np.testing.assert_array_equal(hwi, [10])
def test_convert_units(self, tasmax_series):
tx = np.zeros(366)
tx[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tx = tasmax_series(tx, start="1/1/2000")
tx.attrs["units"] = "C"
hwi = atmos.heat_wave_index(tx, freq="YS")
np.testing.assert_array_equal(hwi, [10])
def test_nan_presence(self, tasmax_series):
tx = np.zeros(366)
tx[:10] = np.array([29, 31, 31, 31, 29, 31, 31, 31, 31, 31])
tx[-1] = np.nan
tx = tasmax_series(tx + K2C, start="1/1/2000")
hwi = atmos.heat_wave_index(tx, freq="YS")
np.testing.assert_array_equal(hwi, [np.nan])
class TestDailyFreezeThaw:
nc_tasmax = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmax_1990.nc")
nc_tasmin = os.path.join("NRCANdaily", "nrcan_canada_daily_tasmin_1990.nc")
def test_3d_data_with_nans(self):
tasmax = open_dataset(self.nc_tasmax).tasmax
tasmin = open_dataset(self.nc_tasmin).tasmin
# put a nan somewhere
tasmin.values[180, 1, 0] = np.nan
frzthw = atmos.daily_freezethaw_cycles(tasmin, tasmax, freq="YS")
min1 = tasmin.values[:, 0, 0]
max1 = tasmax.values[:, 0, 0]
frzthw1 = ((min1 < K2C) * (max1 > K2C) * 1.0).sum()
| np.testing.assert_allclose(frzthw1, frzthw.values[0, 0, 0]) | numpy.testing.assert_allclose |
# -----------------------------------------------------------------------------
# File: face_preprocess.py
"""This code is take from
git checkout 07f6547
src/common/face_preprocess.py
I have added some comments and docstrings to this code (which are
completely missing in the original).
"""
from typing import Tuple
import cv2
from skimage import transform as trans
import numpy as np
# toolbox imports
from dltb.base.image import Image, Imagelike, Sizelike
class FaceAligner:
@staticmethod
def parse_lst_line(line):
"""Parse alignment information from a text line (tabulator separated
values). The line should contain either 3, 7 or 17 values.
The first 3 values specify (aligned, image_path, label),
the (numeriic) aligned flag, the path to the image file
and a numerical label. This can be followed in fields 3 to 6 by
four integer coordinates for the bouding boxes, and then in
fields 7-16 by ten coordinates for the facial landmarks.
Arguments
---------
line:
Result
------
image_path: str
label: int
bbox: np.ndarray of shape (4,), dtype np.int32
landmark: np.ndarray of shape (2, 5), dtype np.float32
aligned: int
"""
vec = line.strip().split("\t")
assert len(vec) >= 3
aligned = int(vec[0])
image_path = vec[1]
label = int(vec[2])
bbox = None
landmark = None
# print(vec)
if len(vec) > 3:
bbox = np.zeros((4,), dtype=np.int32)
for i in range(3, 7):
bbox[i-3] = int(vec[i])
landmark = None
# optional: coordinates for 5 landmarks
if len(vec) > 7:
coordinates = []
for i in range(7, 17):
coordinates.append(float(vec[i]))
landmark = np.array(coordinates).reshape((2, 5)).T
return image_path, label, bbox, landmark, aligned
@staticmethod
def read_image(img_path, **kwargs):
"""Read an image from a file
"""
mode = kwargs.get('mode', 'rgb')
layout = kwargs.get('layout', 'HWC')
if mode == 'gray':
img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
else:
img = cv2.imread(img_path, cv2.CV_LOAD_IMAGE_COLOR)
if mode == 'rgb':
# print('to rgb')
img = img[..., ::-1] # BGR -> RGB
if layout == 'CHW':
img = np.transpose(img, (2, 0, 1))
return img
def preprocess(self, image: Imagelike, size: Tuple[int, int] = None,
bbox=None, landmark=None,
margin: int = 0, **kwargs): # margin=44
"""Preprocess the image. Preprocessing consists of multiple steps:
1. read the image
2. obtain the target image size
3. align the image
Arguments
---------
image:
The image to be preprocessed.
size:
The target size of the image after preprocessing.
bbox:
The bounding for the image.
landmarks:
Facial landmarks for face alignment.
margin:
Extra margin to put around the face.
"""
#
# 1. read the image
#
img = Image.as_array(image)
#
# 2. obtain the target image size
#
# str_image_size = image_size
# image_size = [] # image_size as two-element list [width, height]
# if str_image_size:
# image_size = [int(x) for x in str_image_size.split(',')]
# if len(image_size) == 1:
# image_size = [image_size[0], image_size[0]]
if size is None:
image_size = (112, 112)
else:
image_size = size
assert len(image_size) == 2
assert image_size[0] == 112
assert image_size[0] == 112 or image_size[1] == 96
#
# 3. align the image
#
# obtain a transformation matrix
transformation = landmark and self._transformation_matrix(landmark)
# if no transformation was obtained, just resize
if transformation is None:
return self._resize_image(img, image_size, margin=margin)
# otherweise apply the transformation
return self._transform_image(img, transformation, image_size)
@staticmethod
def _transformation_matrix(landmarks, size):
"""
size:
The size of the target image. Only two sizes are supported:
(112, 1112) or (112, 96).
"""
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041]], dtype=np.float32)
if size[1] == 112:
src[:, 0] += 8.0
dst = landmarks.astype(np.float32)
# src = src[0:3,:]
# dst = dst[0:3,:]
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
trans_matrix = tform.params[0:2, :]
# trans_matrix = \
# cv2.estimateRigidTransform(dst.reshape(1,5,2),
# src.reshape(1,5,2), False)
# print(src.shape, dst.shape)
# print(src)
# print(dst)
# print(trans_matrix)
return trans_matrix
@staticmethod
def _resize_image(image: np.ndarray, image_size, bbox=None,
margin: int = 44):
"""
Arguments
---------
image:
The image to be resized.
size:
The new size of the image.
bbox:
BoundingBox (x1, y1, x2, y2) to cut out of the input image.
Only that part of the input image will be resized.
If the (extended - see margin) bounding box reaches out
of the image boundaries, it will adapted so that it is
inside the image (this may distort the aspect ratio).
margin:
An extra margin by which the bounding box is extended
on all sides (half of the margin is added left/rigth and
top/bottom).
"""
# no transformation: use bounding box for resizing
if bbox is None: # use center crop
det = np.zeros(4, dtype=np.int32)
det[0] = int(image.shape[1]*0.0625)
det[1] = int(image.shape[0]*0.0625)
det[2] = image.shape[1] - det[0]
det[3] = image.shape[0] - det[1]
else:
det = bbox
bbox = | np.zeros(4, dtype=np.int32) | numpy.zeros |
#!/usr/bin/env python
"""
Testing healpix module
"""
__author__ = "<NAME>"
import unittest
import numpy as np
import healpy as hp
import fitsio
from ugali.utils import healpix
from ugali.utils.logger import logger
logger.setLevel(logger.WARN)
NSIDE = 4096
FACTOR = 4
PIX = np.array([104582830, 43361203, 142027178])
U_GRADE_PIX_NEST = np.array([[418331320, 418331321, 418331322, 418331323],
[173444812, 173444813, 173444814, 173444815],
[568108712, 568108713, 568108714, 568108715]])
U_GRADE_PIX_RING = np.array([[418356572, 418323804, 418323803, 418291036],
[173492070, 173459302, 173459301, 173426534],
[568152916, 568120148, 568120147, 568087380]])
D_GRADE_PIX_NEST = PIX//FACTOR
D_GRADE_PIX_RING = | np.array([26142551, 10842585, 35509461]) | numpy.array |
import numpy as np
import pandas as pd
import unittest
from collections import OrderedDict
from kabuki.generate import gen_rand_data, _add_noise
def gen_func_df(size=100, loc=0, scale=1):
data = np.random.normal(loc=loc, scale=scale, size=size)
return pd.DataFrame(data, columns=['data'])
class TestGenerate(unittest.TestCase):
def runTest(self):
pass
def test_add_noise(self):
noise = 1
params = OrderedDict([('loc', 0), ('scale', 1)])
np.random.seed(31337)
new_params = _add_noise({'test': params}, noise=noise)['test']
# check if copied
self.assertFalse(new_params is params)
# check if noise correctly added
np.random.seed(31337)
self.assertTrue(new_params['loc'] == np.random.normal(loc=params['loc'], scale=noise))
self.assertTrue(new_params['scale'] == np.random.normal(loc=params['scale'], scale=noise))
# test whether exclude works
new_params = _add_noise({'test': params}, noise=noise, exclude_params=('scale',))['test']
self.assertTrue(new_params['scale'] == 1)
# test whether bounds work
for i in range(10):
bound_params = _add_noise({'test': params}, bounds={'loc': (-1, 1), 'scale': (0, 2)}, noise=3)['test']
assert (bound_params['loc'] > -1) and (bound_params['loc'] < 1)
assert (bound_params['scale'] > 0) and (bound_params['scale'] < 2)
# test whether valid_func works
check_valid_func = lambda **params: (params['loc'] > -1) and (params['loc'] < 1) and (params['scale'] > 0) and (
params['scale'] < 2)
for i in range(10):
bound_params = _add_noise({'test': params}, check_valid_func=check_valid_func, noise=3)['test']
assert (bound_params['loc'] > -1) and (bound_params['loc'] < 1)
assert (bound_params['scale'] > 0) and (bound_params['scale'] < 2)
def test_single_cond_no_subj(self):
params = {'loc': 0, 'scale': 1}
seed = 31337
data, params_return = gen_rand_data(gen_func_df, params, size=100, seed=seed)
| np.random.seed(seed) | numpy.random.seed |
"""
Demo of HMR.
Note that HMR requires the bounding box of the person in the image. The best performance is obtained when max length of the person in the image is roughly 150px.
When only the image path is supplied, it assumes that the image is centered on a person whose length is roughly 150px.
Alternatively, you can supply output of the openpose to figure out the bbox and the right scale factor.
Sample usage:
# On images on a tightly cropped image around the person
python -m demo --img_path data/im1963.jpg
python -m demo --img_path data/coco1.png
# On images, with openpose output
python -m demo --img_path data/random.jpg --json_path data/random_keypoints.json
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
""" added by CCJ:
> see problem : _tkinter.TclError: no display name and no $DISPLAY environment variable, at https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
You can solve it by adding these two lines in the VERY beginning of your *.py script.
It should in the very beginning of the code. This is important.
"""
from os.path import join, exists
import matplotlib
matplotlib.use('Agg')
import sys
from absl import flags
import numpy as np
import skimage.io as io
import tensorflow as tf
from src.util import renderer as vis_util
from src.util import image as img_util
from src.util import openpose as op_util
from src.load_data_4_inference import data_loader_for_inference as load
from src.load_data_4_inference import extract_14_joints
from src.util import surreal_in_extrinc as surreal_util
from src.RunModelDepth import RunModelV2
from src.RunModel import RunModel as RunModelV1
#added by ccj;
import src.pfmutil as pfm
import cv2
import os
from src.config import get_config
from src.pose_perceptron import (get_one_batched_cad_toy_example,
draw_lsp_skeleton, save_to_mesh_file)
from src.benchmark.eval_util import align_by_pelvis,compute_error_one_sample
from datetime import datetime
import src.pfmutil as pfm
import deepdish as dd
#flags.DEFINE_string('depth_fname', 'data/cad-60-small/dep-scale-RGB_20.pfm', 'depth image to run')
#flags.DEFINE_string('image_fname', 'data/cad-60-small/img-scale-RGB_20.pfm', 'depth image to run')
#flags.DEFINE_integer('gender', 2, 'femael :0, male : 1, neutral = 2')
#flags.DEFINE_string('info_fname', 'data/im1963.jpg', 'info file run')
#flags.DEFINE_string('result_dir', 'data/im1963.jpg', 'results dir to save files')
#flags.DEFINE_integer('t_beg', 0, 'frame begin idx')
#flags.DEFINE_integer('t_end', 1, 'frame end idx')
#flags.DEFINE_string('json_path', None, 'If specified, uses the openpose output to crop the image.')
def visualize(img, proc_param, joints, verts, cam, save_fig_result):
"""
Renders the result in original image coordinate frame.
"""
cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
proc_param, verts, cam, joints, img_size=img.shape[:2])
# Render results
skel_img = vis_util.draw_skeleton(img, joints_orig)
rend_img_overlay = renderer(
vert_shifted, cam=cam_for_render, img=img, do_alpha=True)
rend_img = renderer(
vert_shifted, cam=cam_for_render, img_size=img.shape[:2])
rend_img_vp1 = renderer.rotated(
vert_shifted, 60, cam=cam_for_render, img_size=img.shape[:2])
rend_img_vp2 = renderer.rotated(
vert_shifted, -60, cam=cam_for_render, img_size=img.shape[:2])
import matplotlib.pyplot as plt
# plt.ion()
plt.figure(1)
plt.clf()
plt.subplot(231)
plt.imshow(img)
plt.title('input')
plt.axis('off')
plt.subplot(232)
plt.imshow(skel_img)
plt.title('joint projection')
plt.axis('off')
plt.subplot(233)
plt.imshow(rend_img_overlay)
plt.title('3D Mesh overlay')
plt.axis('off')
plt.subplot(234)
plt.imshow(rend_img)
plt.title('3D mesh')
plt.axis('off')
plt.subplot(235)
plt.imshow(rend_img_vp1)
plt.title('diff vp')
plt.axis('off')
plt.subplot(236)
plt.imshow(rend_img_vp2)
plt.title('diff vp')
plt.axis('off')
plt.draw()
"""
> see https://hub.docker.com/r/dawars/hmr/
Matplotlib cannot open a window in docker (by default),
therefore it needs to replaced by saving the figures
instead: In the demo.py change plt.show() to plt.savefig("figure.png")
"""
# added by CCJ;
dockerEnv = True
if not dockerEnv:
plt.show()
else:
plt.savefig(save_fig_result)
print ("saved %s ..." % save_fig_result)
# import ipdb
# ipdb.set_trace()
return cam_for_render, joints_orig
def visualize_joints2d_3kinds(img, joints1, joints2, joints3, save_fig_result):
"""
Renders the result in original image coordinate frame.
"""
# Render results
skel_img1 = vis_util.draw_skeleton(img, joints1)
skel_img2 = vis_util.draw_skeleton(img, joints2)
skel_img3 = vis_util.draw_skeleton(img, joints3)
import matplotlib.pyplot as plt
# plt.ion()
plt.figure(1)
plt.clf()
plt.subplot(311)
plt.imshow(skel_img1)
plt.title('joints2d_gt')
plt.axis('off')
plt.subplot(312)
plt.title('joints 3d smpl ext/intric projection')
plt.imshow(skel_img2)
plt.axis('off')
plt.subplot(313)
plt.imshow(skel_img3)
plt.title('joints 3d tf smpl ext/intric projection')
plt.axis('off')
plt.draw()
"""
> see https://hub.docker.com/r/dawars/hmr/
Matplotlib cannot open a window in docker (by default),
therefore it needs to replaced by saving the figures
instead: In the demo.py change plt.show() to plt.savefig("figure.png")
"""
# added by CCJ;
dockerEnv = True
if not dockerEnv:
plt.show()
else:
plt.savefig(save_fig_result)
print ("saved %s ..." % save_fig_result)
# import ipdb
# ipdb.set_trace()
def preprocess_image(img, depth, json_path=None, joints2d_gt=None, cam_gt=None):
#img = io.imread(img_path)
#if img.shape[2] == 4:
# img = img[:, :, :3]
#if depth_path is not None:
# if ".pfm" in depth_path:
# dep = pfm.load_pfm(depth_path)
# else:
# dep = io.imread(depth_path)
#else:
# dep = np.zeros(img.size, dtype = np.float32)
if img.shape[2] == 4:
img = img[:, :, :3]
depth = np.reshape(depth, [depth.shape[0], depth.shape[1], 1])
img_orig = img
img = | np.concatenate([img, depth], -1) | numpy.concatenate |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.transforms import Bbox
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
from .error_estimation import poisson_interval
########################################
# Histogram plotter
def histplot(h, bins, weights=None, yerr=None, variances=None,
stack=False, density=False,
histtype='step', label=None, edges=False, binticks=False,
ax=None, **kwargs):
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise ValueError("ax must be a matplotlib Axes object")
# mpl updated to new methods
_mpl_up = np.prod([int(v) >= int(ref)
for v, ref in zip(mpl.__version__.split('.')[:3],
[3, 3, 3])
]).astype(bool)
# arg check
if histtype != 'step':
assert edges is False, "edges is only valid with histtype='step'"
_allowed_histtype = ['fill', 'step', 'errorbar']
_err_message = "Select 'histtype' from: {}".format(_allowed_histtype)
assert histtype in _allowed_histtype, _err_message
# Preprocess
h = np.asarray(h)
bins = | np.asarray(bins) | numpy.asarray |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% [markdown]
# # SciPy Family
#
# Python-based ecosystem [scipy.org](https://scipy.org)
#
# * SciPy Library - Fundamental library for scientific computing
# * NumPy - Numeric Python: Base N-dimensional array package
# * Pandas - Data structures (Dataframes) & analysis
# * Mathplotlib - Comprehensive 2D Plotting
# * Sympy - Symbolic mathematics
# * IPython - Enhanced Interactive Console
#
# The datatypes (dtype attribute) supported by Numpy is many:
# [Numpy basic data types](https://docs.scipy.org/doc/numpy/user/basics.types.html)
#%%
# might need to install numpy from the terminal
# %pip install numpy
# %pip3 install numpy
# %sudo pip install numpy
# %sudo pip3 install numpy
# %sudo -H pip3 install numpy
# %conda install numpy
# %pip freeze
# %pip list
# %pip show numpy
#%%
import numpy as np
# or from numpy import *
# import matplotlib.pyplot as plt
# import pandas as pd
#%%
#
# Review lists
list0 = [9,8,7]
list0b = [6,5,4]
#
# What are the lengths of list1 and list1b?
#%%
# What do you get with list0 + list0b?
#
list0+list0b
#%%
# explore data structures with list of list, how many dimensions?
list1 = [ [11,12,13,14], [21,22,23,24], [31,32,33,34]]
list1b = [ [41,42,43,44], [51,52,53,54], [61,62,63,64]]
#
#%%
# Again, what is list1 + list1b?
#
list1+list1b
#%%
# Question: How do you describe (in english) these two lists? What are the "shapes" of the objects?
#
# These are 3 by 4 matrices. Two-dimensional arrays.
#
# Question: how do you get the element '32' in list1?
#
#
#
# Question: how do you get the row of [31,32,33,34] in list1?
#
#
#
#%%
# Question: How to you get the column of 12, 22, 32 ???
#
#
#%%
[ row[1] for row in list1 ]
#%%
# OR Loop it
v3 = []
for row in list1:
v3.append(row[1])
print(v3)
#%%
#%%
list2 = [ [11,12,13], [21,22,23], [31,32,33], [41,42,43] ] # two dimensional list (2-D array) # (4,3)
# list2b = [ [51,52,53], [61,62,63], [71,72,73], [81,82,83]]
# How do you access the different parts of these two lists?
#%%
# How do you create a higher-dimensional list (say 2x3x4)?
#
# list3D = [ [ [111,112,113], [121,122,123], [131,132,133], [141,142,143] ]
# , [ [211,212,213], [221,222,223], [231,232,233], [241,242,243] ] ]
list3D = [ [ [ 111, 112, 113, 114 ], [ 121, 122, 123, 124 ], [131, 132, 133, 134] ] ,
[ [ 211, 212, 213, 214 ], [ 221, 222, 223, 224 ], [231, 232, 233, 234] ] ]
#%%
# Now try numpy
import numpy as np
# Some basic attributes and simple functions of numpy arrays
a = np.arange(15) # numpy creates a range of 15 consecutive integers, like the range() function in basic python
print('a:',a)
a = np.arange(15).reshape(3,-1) # Using -1 for the last dimension lets numpy calculate directly
# a = np.arange(15).reshape(3,5) # Same result as line above
a = np.arange(24).reshape(2,3,-1) # 3d array
print('a:',a)
print('a.shape:',a.shape)
print('a.ndim:',a.ndim)
print('a.dtype:',a.dtype)
print('a.dtype.name:',a.dtype.name)
print('a.itemsize:',a.itemsize)
print('a.size:',a.size)
print('type(a):',type(a))
b = np.array([6, 7, 8])
print('b:',(b))
print('type(b):',type(b))
#
# The opposite of reshape, can use ravel()
print('a ravel:', a.ravel().shape)
print('a again:', a)
#
# IMPORTANT
# The a.ravel() function does NOT change a!!
# I create a true copy of a and ravel/unravel it only.
# Remember the differences in class/object definitions,
# it is critical what is the "return" value in
# those function/methods.
# If return self, you are getting back the object a.
# But this function return a separate true copy of
# the result instead. This is by design.
#
# A lot of other functions in numpy/pandas behave like that too.
#
# The same thing for reshape, for example
print('a reshape:', a.reshape(1,-1))
print('a.shape:',a.shape)
print('a:',a)
print('#',50*"-")
#%%
# If you really want to change a, try this:
# a = a.ravel() # exact same result as
a = a.reshape(-1)
print('a: ',a)
print('type a: ',type(a))
print('a.shape: ',a.shape)
print('#',50*"-")
#%%
# Other examples to create some simply numpy arrays
print('zeros:', np.zeros( (3,4) ))
print('ones:', np.ones( (2,3,4), dtype=np.int16 ))
print('empty:', np.empty( (2,3) ))
print('arange variation 1:', np.arange( 10, 30, 5 ))
print('arange variation 2:', np.arange( 0, 2, 0.3 ) )
print('complex:', np.array( [ [1,2], [3,4] ], dtype=complex ))
print('float:', np.arange(2, 10, dtype=float) )
from numpy import pi
x = np.linspace( 0, 2*pi, 100 )
f = np.sin(x)
print('sine of linspace:',f)
print('#',50*"-")
#%%
# import numpy as np
# Creating numpy arrays from python lists (and other list-like objects)
# Also look at the concept of "strides"
nparray1 = np.array(list1)
print("nparray1 = \n", nparray1)
print("type(nparray1) =", type(nparray1))
print("nparray1.dtype =", nparray1.dtype) # int64
print("nparray1.shape =", nparray1.shape)
print("nparray1.strides =", nparray1.strides) # each value is int64, hence 8-byte of memory, with four columns, it takes 8x4 = 32 bytes to the next row, same position. Strides = (32,8) to the next row and next column
#%%
# if we redo
nparray1 = np.array(list1, dtype= np.int32)
print("nparray1 = \n", nparray1)
print("type(nparray1) =", type(nparray1))
print("nparray1.dtype =", nparray1.dtype) # int32
print("nparray1.shape =", nparray1.shape)
print("nparray1.strides =", nparray1.strides) # now each value is int32, 4-byte, with four columns, it takes 4x4 = 16 bytes to next row.
#%%
# Try others
nparray2 = np.array(list2)
print("nparray2 = \n", nparray2)
print("type(nparray2) =", type(nparray2))
print("nparray2.dtype =", nparray2.dtype) # int64
print("nparray2.shape =", nparray2.shape)
#%%
import sys
try:
nparray12 = nparray1+nparray2
except ValueError as err : # except (RuntimeError, TypeError, NameError):
print("Value Error: {0}".format(err), " Try transpose...")
nparray12 = nparray1+nparray2.T
except TypeError as err : # except (RuntimeError, TypeError, NameError):
print("Type Error: {0}".format(err))
except:
print("unexpected error:", sys.exc_info()[0])
#%%
list4 = [ 5, 'a', 2, 3.5, True ]
list5 = [ 5, [1,4], 3, 1 ]
nparray4 = np.array(list4)
print("nparray4 = \n", nparray4)
print("type(nparray4) =", type(nparray4))
print("nparray4.dtype =", nparray4.dtype)
print("nparray4.shape =", nparray4.shape)
#%%
# list5 = [ 5, [1,4], 3, 1 ]
nparray5 = np.array(list5)
print("nparray5 = \n", nparray5)
print("type(nparray5) =", type(nparray5))
print("nparray5.dtype =", nparray5.dtype)
print("nparray5.shape =", nparray5.shape)
#%%
# If they are 2D-arrays, and have compatible dimensions, you can multiply them as matrices
tprod12 = np.dot(nparray1,nparray2)
print("tprod12.shape =", tprod12.shape)
mprod21 = np.dot(nparray2,nparray1)
print("mprod21.shape =", mprod21.shape)
#%%
# Also try the 3d-array that we constructed...
# In physics, those are called tensors.
nparray3D = np.array(list3D)
print("nparray3D = \n", nparray3D)
print("type(nparray3D) =", type(nparray3D))
print("nparray3D.dtype =", nparray3D.dtype)
print("nparray3D.shape =", nparray3D.shape)
#%%
# If they are 2D-arrays, and have compatible dimensions, you can multiply them as matrices
tprod32 = np.dot(nparray3D,nparray2)
print("tprod32.shape =", tprod32.shape)
#%%[markdown]
# speed and ease of use is the strength of numpy array, compared to python lists.
# The entire array must be of a single type, however.
# If we try to time or clock the code execution times, you will find similar functions
# is much faster than looping thru a python list.
# This is mainly because NumPy is written in C, and optimized these specialized
# operations in a well-designed library.
#%%
# filtering and indexing
print(nparray1[0:2,:2],'\n')
print(nparray1[:,-1:])
#%%
# Let us do something simpler.
# Obtain the third column of nparray1
print(nparray1)
v3 = nparray1[:,2]
print(v3) # it is a column vector, or array one by three (3,1)
print(v3.shape) # it is a column vector, or array one by three (3,1)
# Much easier than dealing with lists on the coding side of things. Speed is also maximized.
#%%
# BROADCASTING
#
# Let's practice slicing numpy arrays and using NumPy's broadcasting concept.
# Remember, broadcasting refers to a numpy array's ability to VECTORIZE operations,
# so they are performed on all elements of an object at once.
# If you need to perform some simple operations on all array elements,
#
nparray1squared = nparray1 ** 2
print(nparray1squared)
#%%
nparray1mod7 = nparray1 % 7 # remainder from dividing by 7
print(nparray1mod7)
#%%
nparray1b = np.array(list1b)
nparray1bovera = nparray1b / nparray1
print(nparray1bovera)
# Try some other operations, see if they work.
# Next try to do the above with loops or comprehensions?
#%%
# boolean indexing
print(nparray1)
npbool1greater = nparray1 > 21
print(npbool1greater)
#%%
print(nparray1[npbool1greater])
#%%
print(nparray1[npbool1greater].shape)
#%%
npbool1mod = nparray1 %2 ==1
print(npbool1mod)
print(nparray1[npbool1mod])
print(nparray1[npbool1mod].shape)
# Again, try to do these with loops or comprehensions?
#%%
# Let us look at filtering again.
x = | np.arange(10) | numpy.arange |
import numpy as np
import numba
import math
def dtft(x, omegas):
"""
Exact evaluation the DTFT at the indicated points omega for the signal x
Note this is incredibly slow
Note x runs from 0 to N-1
"""
N = len(x)
ns = np.arange(N)
W = np.zeros((len(omegas), N), dtype=np.complex128)
for wi, w in enumerate(omegas):
W[wi, :] = np.exp(-1.0j * w * ns)
return np.dot(W, x)
@numba.jit(nopython=True)
def nextpow2(n):
"""
Return the smallest power of two greater than or equal to n.
"""
return int(math.ceil(math.log(n)/math.log(2)))
# now try ourselves a chirp-z transform
@numba.jit
def chirpz(x, M, A, W):
"""
chirp z transform per Rabiner derivation pp1256
x is our (complex) signal of length N
"""
N = len(x)
L = 2**(nextpow2(N + M -1)) # or nearest power of two
yn = np.zeros(L, dtype=np.complex128)
for n in range(N):
yn_scale = A**(-n) * W**((n**2.0)/2.0)
yn[n] = x[n] * yn_scale
Yr = np.fft.fft(yn)
vn = np.zeros(L, dtype=np.complex128)
for n in range(M):
vn[n] = W**((-n**2.0)/2.0)
for n in range(L-N+1, L):
vn[n] = W**(-((L-n)**2.0)/2.0)
Vr = np.fft.fft(vn)
Gr = Yr * Vr
gk = np.fft.ifft(Gr)
#gk = np.convolve(yn, vn)
Xk = np.zeros(M, dtype=np.complex128)
for k in range(M):
g_scale = W**((k**2.0)/2.0)
Xk[k] = g_scale * gk[k]
return Xk
@numba.jit
def chirpz2d(x, M, A, W):
N = len(x)
x = x.T
out = np.zeros((N, M), dtype=np.complex128)
for i in range(N):
out[i] = chirpz(x[i], M, A, W)
out2d = np.zeros((M, M), dtype=np.complex128)
for i in range(M):
out2d[i] = chirpz(out[:, i], M, A, W)
return out2d
@numba.jit
def fchirpz2d(x, M, A, W):
"""
chirp z transform per Rabiner derivation pp1256
x is our (complex) signal of length N
assume x is square, output M will be square, dims are the same on all sides
"""
N = len(x)
L = 2**(nextpow2(N + M -1)) # or nearest power of two
yn = np.zeros((L, L), dtype=np.complex128)
ns = | np.arange(N) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 16:29:08 2020
@author: L.I.Vazquez-Salazar
@email: <EMAIL>
Class to compute the KL divergency for two distributions.
"""
import numpy as np
import pandas as pd
from scipy import stats
from scipy import integrate
class Metrics:
def __init__(self,df,df1):
self.df = df
self.df1 = df1
def get_data(self,key,key1):
'''
Read .csv file for the reference and target databases
Parameters
----------
key : Key decribing the values of the bond lenght for the reference
database
key1 : Key describing the values of the bond lenght for the target
database
Returns
-------
dp : Values of the bond lenghts for reference and targe.
'''
data = self.df[key]
data_clean = data.dropna()
data1 = self.df1[key1]
data1_clean = data1.dropna()
dp = [data_clean, data1_clean]
return dp
def support_array(self,array,grid_size=1000):
'''
Creates an array for the generation of the Gaussian Kernel distribution
from the minimum value of the array to the maximum. The size of the grid
can be modify.
Parameters
----------
array : Array of values used for the creation of the kernel distribution.
grid_size : TYPE, optional. Number of points used for the array.
The default is 1000.
Returns
-------
sup_arr : Array of values for the evaluation of the Gaussian Kernel distribution.
'''
v_min = np.min(array)-1
v_max = | np.max(array) | numpy.max |
#!/usr/bin/env python
#
"""
This script is to visualize how match sift features are matched between
an image and camera frame.
Usage
-----
$ roslaunch roseus_tutorials usb-camera.launch
$ roslaunch jsk_2015_05_baxter_apc sift_matcher_for_imgs.launch
$ rosrun image_view image_view image:=/sift_matcher_for_imgs/output
"""
import os
import cv2
import numpy as np
import rospy
import cv_bridge
from sensor_msgs.msg import Image
from posedetection_msgs.srv import Feature0DDetect
from sift_matcher import SiftMatcher, imgsift_client
from common import load_img
class ImageSubscriber(object):
def __init__(self, image_topic):
rospy.Subscriber(image_topic, Image, self._cb_img)
rospy.loginfo('Waiting for: {topic}'.format(topic=image_topic))
rospy.wait_for_message(image_topic, Image)
rospy.loginfo('Found: {topic}'.format(topic=image_topic))
def _cb_img(self, msg):
"""Callback function of Subscribers to listen Image"""
bridge = cv_bridge.CvBridge()
self.stamp = msg.header.stamp
self.img = bridge.imgmsg_to_cv2(msg)
class SiftMatcherOneImg(SiftMatcher):
"""Compare two images.
Usually camera image (input) with static image (reference)"""
def __init__(self):
super(SiftMatcherOneImg, self).__init__()
self.img_sub = ImageSubscriber('~input')
self.reference_sub = ImageSubscriber('~input/reference')
self.pub = rospy.Publisher('~output', Image, queue_size=1)
def match(self):
input_stamp, input_img = self.img_sub.stamp, self.img_sub.img
input_features = self.query_features
reference_img = self.reference_sub.img
reference_features = imgsift_client(reference_img)
matches = self.find_match(input_features.descriptors,
reference_features.descriptors)
rospy.loginfo('matches: {}'.format(len(matches)))
# prepare output img
matched_img = drawMatches(input_img, input_features.positions,
reference_img, reference_features.positions,
matches)
cv2.putText(matched_img, 'matches: {}'.format(len(matches)),
(5, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
self.publish_img(stamp=input_stamp, img=matched_img)
def publish_img(self, stamp, img, encoding='bgr8'):
bridge = cv_bridge.CvBridge()
img_msg = bridge.cv2_to_imgmsg(img, encoding=encoding)
img_msg.header.stamp = stamp
self.pub.publish(img_msg)
def drawMatches(query_img, query_pos, train_img, train_pos, matches):
"""Draw match points for two images"""
query_img = cv2.cvtColor(query_img, cv2.COLOR_RGB2GRAY)
train_img = cv2.cvtColor(train_img, cv2.COLOR_RGB2GRAY)
query_pos = np.array(query_pos).reshape((-1, 2))
train_pos = | np.array(train_pos) | numpy.array |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl2 DS3
I_CDF = DV_INJ_NC[1].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# REP
assert len(A._ID_dict['non-collapse']) == len(A._ID_dict['repairable'])
assert len(A._ID_dict['irreparable']) == 0
# cost
DV_COST = A._DV_dict['rec_cost']
# DS1
C_CDF = DV_COST.iloc[:, 0]
C_CDF = np.around(C_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 2500], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
C_CDF = DV_COST.iloc[:, 1]
C_CDF = np.around(C_CDF / 100., decimals=0) * 100.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 25000], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
C_CDF = DV_COST.iloc[:, 2]
C_CDF = np.around(C_CDF / 1000., decimals=0) * 1000.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 250000], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# time
DV_TIME = A._DV_dict['rec_time']
# DS1
T_CDF = DV_TIME.iloc[:, 0]
T_CDF = np.around(T_CDF, decimals=1)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 2.5], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
T_CDF = DV_TIME.iloc[:, 1]
T_CDF = np.around(T_CDF, decimals=0)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 25], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
T_CDF = DV_TIME.iloc[:, 2]
T_CDF = np.around(T_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 250], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert_allclose(S[('event time', 'month')], A._TIME['month'] + 1)
assert_allclose(S[('event time', 'weekday?')], A._TIME['weekday?'])
assert_allclose(S[('event time', 'hour')], A._TIME['hour'])
assert_allclose(S[('inhabitants', '')], A._POP.iloc[:, 0])
assert SD.loc[('collapses', 'collapsed'), 'mean'] == pytest.approx(0.5,
rel=0.05)
assert SD.loc[('collapses', 'mode'), 'mean'] == 0.
assert SD.loc[('collapses', 'mode'), 'count'] == pytest.approx(5000,
rel=0.05)
assert SD.loc[('red tagged', ''), 'mean'] == pytest.approx(0.5, rel=0.05)
assert SD.loc[('red tagged', ''), 'count'] == pytest.approx(5000, rel=0.05)
for col in ['irreparable', 'cost impractical', 'time impractical']:
assert SD.loc[('reconstruction', col), 'mean'] == 0.
assert SD.loc[('reconstruction', col), 'count'] == pytest.approx(5000,
rel=0.05)
RC = deepcopy(S.loc[:, ('reconstruction', 'cost')])
RC_CDF = np.around(RC / 1000., decimals=0) * 1000.
vals, counts = np.unique(RC_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]) * 1000.)
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
RT = deepcopy(S.loc[:, ('reconstruction', 'time-parallel')])
RT_CDF = np.around(RT, decimals=0)
vals, counts = np.unique(RT_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]))
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
assert_allclose(S.loc[:, ('reconstruction', 'time-parallel')],
S.loc[:, ('reconstruction', 'time-sequential')])
CAS = deepcopy(S.loc[:, ('injuries', 'sev1')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.075, 0.15, 0.25, 0.3, 0.5, 1.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2, 2.5, 7, 5]) / 56., atol=0.01,
rtol=0.1)
CAS = deepcopy(S.loc[:, ('injuries', 'sev2')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.025, 0.05, 0.1, 2.25, 4.5, 9.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2.5, 2, 7, 5]) / 56., atol=0.01,
rtol=0.1)
def test_FEMA_P58_Assessment_EDP_uncertainty_basic():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_2.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_2.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
assert_allclose(thetas, [9.80665, 12.59198, 0.074081, 0.044932], rtol=0.02)
assert_allclose(betas, [0.25, 0.25, 0.3, 0.4], rtol=0.02)
rho = RV_EDP[0].RV_set.Rho()
rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
assert_allclose(rho, rho_target, atol=0.05)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer(
[0.3, 0.4], [0.3, 0.4]),
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000. for i in
range(8)]
DMG_1_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.1]))[
0]
DMG_2_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.1, 0.1]))[
0]
DMG_1_PFA = mvn_od(np.log([0.074081, 9.80665]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
DMG_2_PFA = mvn_od(np.log([0.074081, 12.59198]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert DMG_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert DMG_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert DMG_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021 and 1022
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2011 and 2012
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2021 and 2022
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 9.80665]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert RED_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert RED_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert RED_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log([0.074081, 0.044932, 9.80665, 12.59198]),
np.array(
[[1.0, 0.7, 0.3, 0.3], [0.7, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.6],
[0.3, 0.3, 0.6, 1.0]]) * np.outer(
[0.3, 0.4, 0.25, 0.25],
[0.3, 0.4, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[0.05488, 0.05488, 9.80665, 9.80665]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
def test_FEMA_P58_Assessment_EDP_uncertainty_detection_limit():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test differs from the basic case in having unreliable EDP values above
a certain limit - a typical feature of interstory drifts in dynamic
simulations. Such cases should not be a problem if the limits can be
estimated and they are specified as detection limits in input file.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_3.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_3.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:, 2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_failed_analyses():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
Here we use EDP results with unique values assigned to failed analyses.
In particular, PID=1.0 and PFA=100.0 are used when an analysis fails.
These values shall be handled by detection limits of 10 and 100 for PID
and PFA, respectively.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_4.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_4.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:,2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:,2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_3D():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we look at the propagation of EDP values provided for two
different directions. (3D refers to the numerical model used for response
estimation.)
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_5.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_5.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 8.65433, 12.59198, 11.11239,
0.074081, 0.063763, 0.044932, 0.036788]
EDP_beta_target = [0.25, 0.25, 0.25, 0.25, 0.3, 0.3, 0.4, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.array([
[1.0, 0.8, 0.6, 0.5, 0.3, 0.3, 0.3, 0.3],
[0.8, 1.0, 0.5, 0.6, 0.3, 0.3, 0.3, 0.3],
[0.6, 0.5, 1.0, 0.8, 0.3, 0.3, 0.3, 0.3],
[0.5, 0.6, 0.8, 1.0, 0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3, 1.0, 0.8, 0.7, 0.6],
[0.3, 0.3, 0.3, 0.3, 0.8, 1.0, 0.6, 0.7],
[0.3, 0.3, 0.3, 0.3, 0.7, 0.6, 1.0, 0.8],
[0.3, 0.3, 0.3, 0.3, 0.6, 0.7, 0.8, 1.0]])
large_rho_ids = np.where(EDP_rho_target >= 0.5)
small_rho_ids = np.where(EDP_rho_target < 0.5)
assert_allclose(EDP_rho_test[large_rho_ids], EDP_rho_target[large_rho_ids],
atol=0.1)
assert_allclose(EDP_rho_test[small_rho_ids], EDP_rho_target[small_rho_ids],
atol=0.2)
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
theta_PID = np.log(EDP_theta_target[4:])
COV_PID = EDP_COV_test[4:, 4:]
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(theta_PID, COV_PID,
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1, abs=0.05)
# DMG
realization_count = float(A._AIM_in['general']['realizations'])
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / realization_count for i in
range(8)]
DMG_1_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 9.80665, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 9.80665,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_ref = [DMG_1_1_PID, DMG_1_2_PID, DMG_2_1_PID, DMG_2_2_PID,
DMG_1_1_PFA, DMG_1_2_PFA, DMG_2_1_PFA, DMG_2_2_PFA]
assert_allclose(DMG_check, DMG_ref, rtol=0.10, atol=0.01)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 249., 624., 1251., 1875.]
T_target = [0., 0.249, 0.624, 1.251, 1.875]
# PG 1011
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 0].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 0].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1012
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 1].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 1].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.05488, 0.1, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 2].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 2].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
#print('------------------------')
#print('P_target')
#print(P_target)
#print('------------------------')
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1022
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.05488, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 3].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 5)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 3].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 5)]
P_test = P_test[np.where(P_test > 5)]
P_test = P_test / realization_count
assert_allclose(P_target[:-1], P_test[:4], atol=0.05)
assert_allclose(C_target[:-1], C_test[:4], rtol=0.001)
assert_allclose(T_target[:-1], T_test[:4], rtol=0.001)
# PG 2011
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 4].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 4].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 5].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 5].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target[:4], P_test[:4], atol=0.05)
assert_allclose(C_target[:4], C_test[:4], rtol=0.001)
assert_allclose(T_target[:4], T_test[:4], rtol=0.001)
# PG 2021
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 6].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 6].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 7].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 7].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / realization_count).values
assert_allclose(RED_check, DMG_ref, atol=0.02, rtol=0.10)
DMG_on = | np.where(A._DMG > 0.0) | numpy.where |
"""
Several helper functions for calculating pairwise quantities.
Original code in this file from IDP Conformer Generator package
(https://github.com/julie-forman-kay-lab/IDPConformerGenerator)
developed by <NAME> (@joaomcteixeira), and added to the
MSCCE repository in commit 30e417937968f3c6ef09d8c06a22d54792297161.
"""
import numpy as np
import numba as nb
from numba import jit
@jit(nb.float64[:,:](nb.float32[:,:,:]), nopython=True, nogil=True)
def calc_all_vs_all_dists(coords):
"""
Calculate the upper half of all vs. all distances for a batch.
Reproduces the operations of scipy.spatial.distance.pdist.
Parameters
----------
coords : np.ndarray, shape (B, N, 3), dtype=np.float64, B: batch size
Returns
-------
np.ndarray, shape (B, (N * N - N) // 2,), dytpe=np.float64
"""
batch_size = coords.shape[0]
len_ = coords.shape[1]
shape = (batch_size, (len_ * len_ - len_) // 2,)
results = np.empty(shape, dtype=np.float64)
for bi in range(batch_size):
c = 1
i = 0
for a in coords[bi]:
for b in coords[bi,c:]:
x = b[0] - a[0]
y = b[1] - a[1]
z = b[2] - a[2]
results[bi, i] = (x * x + y * y + z * z) ** 0.5
i += 1
c += 1
return results
@jit(nb.float64[:,:](nb.float32[:,:,:], nb.float32[:,:,:]), nopython=True, nogil=True)
def calc_new_vs_old_dists(coords_new, coords_old):
"""
First calculate the upper half all-vs-all distances for coords_new,
and then calculate all-new vs all-old distances
"""
batch_size = coords_new.shape[0]
len_new = coords_new.shape[1]
len_old = coords_old.shape[1]
shape = (batch_size, (len_new * len_new - len_new) // 2 + len_new * len_old,)
results = np.empty(shape, dtype=np.float64)
for bi in range(batch_size):
c = 1
i = 0
# new coord block
for a in coords_new[bi]:
for b in coords_new[bi,c:]:
x = b[0] - a[0]
y = b[1] - a[1]
z = b[2] - a[2]
results[bi, i] = (x * x + y * y + z * z) ** 0.5
i += 1
c += 1
# new-old coord block
for a in coords_new[bi]:
for b in coords_old[bi]:
x = b[0] - a[0]
y = b[1] - a[1]
z = b[2] - a[2]
results[bi, i] = (x * x + y * y + z * z) ** 0.5
i += 1
return results
@jit(nb.void(nb.float64[:],nb.float64[:]), nopython=True, nogil=True)
def sum_upper_diagonal_raw(data, result):
"""
Calculate outer sum for upper diagonal with for loops.
The use of for-loop based calculation avoids the creation of very
large arrays using numpy outer derivates. This function is thought
to be jut compiled.
Does not create new data structure. It requires the output structure
to be provided. Hence, modifies in place. This was decided so
because this function is thought to be jit compiled and errors with
the creation of very large arrays were rising. By passing the output
array as a function argument, errors related to memory freeing are
avoided.
Parameters
----------
data : an interable of Numbers, of length N
result : a mutable sequence, either list of np.ndarray,
of length N*(N-1)//2
"""
c = 0
len_ = len(data)
for i in range(len_ - 1):
for j in range(i + 1, len_):
result[c] = data[i] + data[j]
c += 1
# assert result.size == (data.size * data.size - data.size) // 2
# assert abs(result[0] - (data[0] + data[1])) < 0.0000001
# assert abs(result[-1] - (data[-2] + data[-1])) < 0.0000001
return
@jit(nb.void(nb.float64[:],nb.float64[:],nb.float64[:]), nopython=True, nogil=True)
def sum_partial_upper_diagonal(data_new, data_old, result):
sum_upper_diagonal_raw(data_new, result)
c = len(data_new) * (len(data_new) - 1) // 2
for i in range(len(data_new)):
for j in range(len(data_old)):
result[c] = data_new[i] + data_old[j]
c += 1
return
@jit(nb.void(nb.float64[:],nb.float64[:]), nopython=True, nogil=True)
def multiply_upper_diagonal_raw(data, result):
"""
Calculate the upper diagonal multiplication with for loops.
The use of for-loop based calculation avoids the creation of very
large arrays using numpy outer derivatives. This function is thought
to be njit compiled.
Does not create new data structure. It requires the output structure
to be provided. Hence, modifies in place. This was decided so
because this function is thought to be jit compiled and errors with
the creation of very large arrays were rising. By passing the output
array as a function argument, errors related to memory freeing are
avoided.
Parameters
----------
data : an interable of Numbers, of length N
result : a mutable sequence, either list of np.ndarray,
of length N*(N-1)//2
"""
c = 0
len_ = len(data)
for i in range(len_ - 1):
for j in range(i + 1, len_):
result[c] = data[i] * data[j]
c += 1
@jit(nb.void(nb.float64[:],nb.float64[:],nb.float64[:]), nopython=True, nogil=True)
def multiply_partial_upper_diagonal(data_new, data_old, result):
multiply_upper_diagonal_raw(data_new, result)
c = len(data_new) * (len(data_new) - 1) // 2
for i in range(len(data_new)):
for j in range(len(data_old)):
result[c] = data_new[i] * data_old[j]
c += 1
return
@jit(nopython=True, nogil=True)
def calc_angle_coords(
coords,
ARCCOS=np.arccos,
DOT=np.dot,
NORM=np.linalg.norm,
):
"""Calculate the angle between two vectors."""
# https://stackoverflow.com/questions/2827393/
v1 = coords[0] - coords[1]
v2 = coords[2] - coords[1]
return calc_angle(v1, v2)
@jit(nopython=True, nogil=True)
def calc_angle(
v1, v2,
ARCCOS=np.arccos,
DOT=np.dot,
NORM=np.linalg.norm,
):
"""Calculate the angle between two vectors."""
# https://stackoverflow.com/questions/2827393/
v1_u = v1 / NORM(v1)
v2_u = v2 / NORM(v2)
dot_ncan = np.dot(v1_u, v2_u)
if dot_ncan < -1.0:
dot_ncan_clean = -1.0
elif dot_ncan > 1.0:
dot_ncan_clean = 1.0
else:
dot_ncan_clean = dot_ncan
return ARCCOS(dot_ncan_clean)
# @njit
def calc_torsion_angles(
coords,
ARCTAN2=np.arctan2,
CROSS=np.cross,
DIAGONAL=np.diagonal,
MATMUL=np.matmul,
NORM=np.linalg.norm,
):
"""
Calculate torsion angles from sequential coordinates.
Uses ``NumPy`` to compute angles in a vectorized fashion.
Sign of the torsion angle is also calculated.
Uses Prof. Azevedo implementation:
https://azevedolab.net/resources/dihedral_angle.pdf
Example
-------
Given the sequential coords that represent a dummy molecule of
four atoms:
>>> xyz = numpy.array([
>>> [0.06360, -0.79573, 1.21644],
>>> [-0.47370, -0.10913, 0.77737],
>>> [-1.75288, -0.51877, 1.33236],
>>> [-2.29018, 0.16783, 0.89329],
>>> ])
A1---A2
\
\
A3---A4
Calculates the torsion angle in A2-A3 that would place A4 in respect
to the plane (A1, A2, A3).
Likewise, for a chain of N atoms A1, ..., An, calculates the torsion
angles in (A2, A3) to (An-2, An-1). (A1, A2) and (An-1, An) do not
have torsion angles.
If coords represent a protein backbone consisting of N, CA, and C
atoms and starting at the N-terminal, the torsion angles are given
by the following slices to the resulting array:
- phi (N-CA), [2::3]
- psi (CA-C), [::3]
- omega (C-N), [1::3]
Parameters
----------
coords : numpy.ndarray of shape (N>=4, 3)
Where `N` is the number of atoms, must be equal or above 4.
Returns
-------
numpy.ndarray of shape (N - 3,)
The torsion angles in radians.
If you want to convert those to degrees just apply
``np.degrees`` to the returned result.
"""
# requires
assert coords.shape[0] > 3
assert coords.shape[1] == 3
crds = coords.T
# Yes, I always write explicit array indices! :-)
q_vecs = crds[:, 1:] - crds[:, :-1]
cross = CROSS(q_vecs[:, :-1], q_vecs[:, 1:], axis=0)
unitary = cross / NORM(cross, axis=0)
# components
# u0 comes handy to define because it fits u1
u0 = unitary[:, :-1]
# u1 is the unitary cross products of the second plane
# that is the unitary q2xq3, obviously applied to the whole chain
u1 = unitary[:, 1:]
# u3 is the unitary of the bonds that have a torsion representation,
# those are all but the first and the last
u3 = q_vecs[:, 1:-1] / NORM(q_vecs[:, 1:-1], axis=0)
# u2
# there is no need to further select dimensions for u2, those have
# been already sliced in u1 and u3.
u2 = CROSS(u3, u1, axis=0)
# calculating cos and sin of the torsion angle
# here we need to use the .T and np.diagonal trick to achieve
# broadcasting along the whole coords chain
# np.matmul is preferred to np.dot in this case
# https://numpy.org/doc/stable/reference/generated/numpy.matmul.html
cos_theta = DIAGONAL(MATMUL(u0.T, u1))
sin_theta = DIAGONAL(MATMUL(u0.T, u2))
# torsion angles
return -ARCTAN2(sin_theta, cos_theta)
@jit(nopython=True, nogil=True)
def norm_along_last_axis(array):
# calculate norm along the last axis and keep the dimension
original_shape = array.shape
flattened_to_last_axis = np.reshape(array, (-1, original_shape[-1]))
result = np.empty((flattened_to_last_axis.shape[0], 1))
for i in range(flattened_to_last_axis.shape[0]):
result[i] = np.sum(flattened_to_last_axis[i] ** 2) ** (1/2)
return result.reshape(original_shape[: -1] + (1, ))
@jit(nopython=True, nogil=True)
def dot_along_last_axis(array1, array2):
# dot product along the last axis and keep the dimension
result = np.empty(len(array1))
for i in range(len(array1)):
result[i] = np.dot(array1[i], array2[i])
return result
@jit(nopython=True, nogil=True)
def calc_proper_torsions(coords):
"""
A vectorized and jitted version for calculating a set of proper torsion angle values
Params
----------
coords: np.array with shape Bx4x3
B is the batch size, 4 are the 4 atoms in each batch for defining the dihedral angle,
and 3 are x,y,z coordinates
Returns
----------
result: np.array with shape B
The proper torsion angles for each batch element, in units of radian
"""
# coords: Bx4x3
q_vecs = coords[:, 1:, :] - coords[:, :-1, :] # Bx3x3
cross = np.cross(q_vecs[:, :-1, :], q_vecs[:, 1:, :])
unitary = cross / norm_along_last_axis(cross) # Bx2x3
u0 = unitary[:, 0, :]
u1 = unitary[:, 1, :]
u3 = q_vecs[:, 1, :] / norm_along_last_axis(q_vecs[:, 1, :] * 1) # strange bug: have to multiply by 1, otherwise numba does not compile
u2 = np.cross(u3, u1)
cos_theta = dot_along_last_axis(u0, u1)
sin_theta = dot_along_last_axis(u0, u2)
result = -np.arctan2(sin_theta, cos_theta)
return result
def calc_improper_torsion_angles():
pass
@jit(nopython=True, nogil=True)
def hamiltonian_multiplication_Q(a1, b1, c1, d1, a2, b2, c2, d2):
"""Hamiltonian Multiplication."""
return (
(a1 * a2) - (b1 * b2) - (c1 * c2) - (d1 * d2),
(a1 * b2) + (b1 * a2) + (c1 * d2) - (d1 * c2),
(a1 * c2) - (b1 * d2) + (c1 * a2) + (d1 * b2),
(a1 * d2) + (b1 * c2) - (c1 * b2) + (d1 * a2),
)
@jit(nopython=True, nogil=True)
def rotate_coordinates_Q(
coords,
rot_vec,
angle_rad,
ARRAY=np.array,
HMQ=hamiltonian_multiplication_Q,
VSTACK=np.vstack,
):
"""
Rotate coordinates by radians along an axis.
Rotates using quaternion operations.
Parameters
----------
coords : nd.array (N, 3), dtype=np.float64
The coordinates to rotate.
rot_vec : (,3)
A 3D space vector around which to rotate coords.
Rotation vector **must** be a unitary vector.
angle_rad : float
The angle in radians to rotate the coords.
Returns
-------
nd.array shape (N, 3), dtype=np.float64
The rotated coordinates
"""
# assert coords.shape[1] == 3
b2, b3, b4 = np.sin(angle_rad / 2) * rot_vec
b1 = np.cos(angle_rad / 2)
c1, c2, c3, c4 = HMQ(
b1, b2, b3, b4,
0, coords[:, 0], coords[:, 1], coords[:, 2],
)
_, d2, d3, d4 = HMQ(
c1, c2, c3, c4,
b1, -b2, -b3, -b4,
)
rotated = VSTACK((d2, d3, d4)).T
assert rotated.shape[1] == 3
return rotated
@jit(nopython=True, nogil=True)
def place_sidechain_template(
bb_cnf,
ss_template,
CROSS=np.cross,
NORM=np.linalg.norm,
):
"""
Place sidechain templates on backbone.
Sidechain residue template is expected to have CA already at 0,0,0.
Parameters
----------
bb_cnf : numpy nd.array, shape (3, 3), dtype=float64
The backbone coords in the form of: N-CA-C
Coordinates are not expected to be at any particular position.
ss_template : numpy nd.array, shape (M, 3), dtype=float64
The sidechain all-atom template. **Expected** to have the CA atom
at the origin (0, 0, 0), and the first 3 atoms are N, CA, C. This requirement could be easily
removed but it is maintained for performance reasons and
considering in the context where this function is meant
to be used.
Returns
-------
nd.array, shape (M, 3), dtype=float64
The displaced side chain coords. All atoms are returned.
"""
# places bb with CA at 0,0,0
bbtmp = | np.full(bb_cnf.shape, np.nan, dtype=np.float32) | numpy.full |
import numpy as np
import xarray as xr
import pandas as pd
import multiprocessing as mp
class PreprocessData:
"""Class instantiation of PreprocessData:
Here we will be preprocessing data for deep learning model training.
Attributes:
working_directory (str): The path to the directory where the deep learning preprocessing files will be saved and worked from.
stormpatch_path (str): Where the storm patch files were saved.
climate (str): The climate period to derive deep learning data for. Options are ``current`` or ``future``.
threshold1 (int): The UH threshold to use. This value will delineate some form of ``severe`` and ``non-severe`` storm patches.
mask (boolean): Whether the threshold will be applied within the storm patch mask or within the full storm patch. Defaults to ``False``.
num_cpus (int): Number of CPUs to use in a node for parallelizing extractions. Defaults to 36 (Cheyenne compute nodes contain 36).
"""
def __init__(self, working_directory, stormpatch_path, climate, threshold1, mask=False, num_cpus=36):
# class attributes
self.working_directory=working_directory
self.stormpatch_path=stormpatch_path
# sanity check
if climate!='current' and climate!='future':
raise Exception("Please enter current or future for climate option.")
else:
self.climate=climate
# class attributes
self.threshold1=threshold1
# string help
self.mask=mask
if not self.mask:
self.mask_str='nomask'
if self.mask:
self.mask_str='mask'
# cpus for parallelizing
self.num_cpus=num_cpus
def generate_time_full(self):
"""Creation of the full time period that will be looped through for extracting storm patch information.
Only considering December-May months due to warm season bias over the central CONUS. The CONUS1 simulations
were run for 2000-2013.
Returns:
Pandas date range (DatetimeIndex).
"""
return pd.date_range('2000-10-01','2013-09-30',freq='MS')[(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==12)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==1)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==2)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==3)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==4)|
(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==5)]
def create_data_indices(self, time):
"""Split the loaded data into categories based on the UH threshold chosen and save the first intermediary files. Here we create
the indices of the storm patches that satisfy UH criteria for later use.
Args:
time (DatetimeIndex): Time object from pandas date range.
"""
if not self.mask:
data=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_assemble=xr.Dataset({'grid':(['x'], np.argwhere(data.uh25_grid.values.max(axis=(1,2)) > self.threshold1)[:,0])})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc")
if self.mask:
data=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_assemble=xr.Dataset({'grid':(['x'], np.argwhere(data.uh25_grid.where(data.mask).max(axis=(1,2), skipna=True).values > self.threshold1)[:,0])})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc")
def parallelizing_indxs(self):
"""Activate the multiprocessing function to parallelize the functions.
"""
print(f"Starting jobs...")
timearray=self.generate_time_full()
pool1=mp.Pool(self.num_cpus)
for time in timearray:
print(f"Extracting {time.strftime('%Y-%m')} indices...")
pool1.apply_async(self.create_data_indices, args=([time]))
pool1.close()
pool1.join()
print(f"Completed the jobs.")
def generate_time_month(self, month_int):
"""Creation of the time array that will be looped through for extracting storm patch information.
Args:
month_int (int): The month being used for the time array (2000-2013 years).
Returns:
Pandas date range (DatetimeIndex) for the respective month.
"""
return pd.date_range('2000-10-01','2013-09-30',freq='MS')[(pd.date_range('2000-10-01','2013-09-30',freq='MS').month==month_int)]
def apply_exceed_mask(self, data_var, data_mask, level):
"""Function to retain the patches that exceeded the threshold.
Args:
data_var (Xarray data array): The variable's data.
data_mask (Xarray data array): The storm patch mask.
level (int): The dataset level coordinate. This could be 0, 1, 2, or 3.
Returns:
Xarray data array of the variable for the storm patches that exceeded the UH threshold.
"""
return data_var.var_grid.sel(levels=level)[data_mask.grid.values,:,:]
def apply_notexceed_mask(self, data_var, data_mask, level):
"""Function to retain the patches that did not exceed the threshold.
Args:
data_var (Xarray data array): The variable's data.
data_mask (Xarray data array): The storm patch mask.
level (int): The dataset level coordinate. This could be 0, 1, 2, or 3.
Returns:
Numpy array of the variable for the storm patches that did not exceed the UH threshold.
"""
return np.delete(data_var.var_grid.sel(levels=level).values, data_mask.grid.values, axis=0)
def flatten_list(self, array):
"""Function to flatten the created list of Xarray data arrays.
Args:
array (list): The list of Xarray data arrays.
Returns:
Flattened list of Xarray data arrays.
"""
return [j for i in array for j in i.values]
def flatten_arraylist(self, array):
"""Function to flatten the created list of numpy arrays.
Args:
array (list): The list of numpy arrays.
Returns:
Flattened list of numpy arrays.
"""
return [j for i in array for j in i]
def month_translate(self, num):
"""Convert integer month to string month.
Args:
num (int): Input month.
Returns:
out (str): Input month as string.
Raises:
ValueError: If the month is not within the study's range (Dec-May).
"""
var={12:'December',
1:'January',
2:'February',
3:'March',
4:'April',
5:'May'}
try:
out=var[num]
return out
except:
raise ValueError("Please enter month integer from Dec-May.")
def run_months(self, months=np.array([12,1,2,3,4,5]), uh=True, nouh=True):
"""Function to automate and parallelize the creation of the exceedance/nonexceedance files.
Args:
months (int array): Months to iterate through.
uh (boolean): Whether to compute analysis for threshold exceedances. Defaults to ``True``.
nouh(boolean): Whether to compute analysis for threshold non-exceedances. Defaults to ``True``.
"""
pool2=mp.Pool(self.num_cpus)
for mo in months:
if uh:
print(f"Creating {self.month_translate(mo)} patches of threshold exceedances...")
pool2.apply_async(self.create_files_exceed_threshold, args=([mo]))
if nouh:
print(f"Creating {self.month_translate(mo)} patches of threshold non-exceedances...")
pool2.apply_async(self.create_files_notexceed_threshold, args=([mo]))
pool2.close()
pool2.join()
print(f"Completed the jobs.")
def create_files_exceed_threshold(self, month_int):
"""Create and save files containing the environment patches for storms that exceeded the threshold.
Data files being opened contain the storm patches, not the full CONUS WRF domain.
Args:
month_int (int): Month for analysis.
"""
time_temp=self.generate_time_month(month_int)
data_temp_sev_1=[]; data_temp_sev_3=[]; data_temp_sev_5=[]; data_temp_sev_7=[]; data_evwd_sev_1=[]; data_evwd_sev_3=[]
data_euwd_sev_1=[]; data_euwd_sev_3=[]; data_euwd_sev_5=[]; data_euwd_sev_7=[]; data_evwd_sev_5=[]; data_evwd_sev_7=[]
data_qvap_sev_1=[]; data_qvap_sev_3=[]; data_qvap_sev_5=[]; data_qvap_sev_7=[]; data_dbzs_sev_1=[]; data_maxw_sev_1=[]
data_pres_sev_1=[]; data_pres_sev_3=[]; data_pres_sev_5=[]; data_pres_sev_7=[]; data_ctts_sev_1=[]; data_mask_sev_1=[]
data_wwnd_sev_1=[]; data_wwnd_sev_3=[]; data_wwnd_sev_5=[]; data_wwnd_sev_7=[]; data_uh25_sev_1=[]; data_uh03_sev_1=[]
for time in time_temp:
print(f"opening files for {time.strftime('%Y')}{time.strftime('%m')}")
data_mask=xr.open_mfdataset(
f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc",
combine='by_coords')
data_temp=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_tk_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_evwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_ev_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_euwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_eu_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_qvap=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_qvapor_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_pres=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_p_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_wwnd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_w_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_maxw=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_maxw_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_gen =xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_temp_sev_1.append(self.apply_exceed_mask(data_temp, data_mask, 0))
data_temp_sev_3.append(self.apply_exceed_mask(data_temp, data_mask, 1))
data_temp_sev_5.append(self.apply_exceed_mask(data_temp, data_mask, 2))
data_temp_sev_7.append(self.apply_exceed_mask(data_temp, data_mask, 3))
data_evwd_sev_1.append(self.apply_exceed_mask(data_evwd, data_mask, 0))
data_evwd_sev_3.append(self.apply_exceed_mask(data_evwd, data_mask, 1))
data_evwd_sev_5.append(self.apply_exceed_mask(data_evwd, data_mask, 2))
data_evwd_sev_7.append(self.apply_exceed_mask(data_evwd, data_mask, 3))
data_euwd_sev_1.append(self.apply_exceed_mask(data_euwd, data_mask, 0))
data_euwd_sev_3.append(self.apply_exceed_mask(data_euwd, data_mask, 1))
data_euwd_sev_5.append(self.apply_exceed_mask(data_euwd, data_mask, 2))
data_euwd_sev_7.append(self.apply_exceed_mask(data_euwd, data_mask, 3))
data_qvap_sev_1.append(self.apply_exceed_mask(data_qvap, data_mask, 0))
data_qvap_sev_3.append(self.apply_exceed_mask(data_qvap, data_mask, 1))
data_qvap_sev_5.append(self.apply_exceed_mask(data_qvap, data_mask, 2))
data_qvap_sev_7.append(self.apply_exceed_mask(data_qvap, data_mask, 3))
data_pres_sev_1.append(self.apply_exceed_mask(data_pres, data_mask, 0))
data_pres_sev_3.append(self.apply_exceed_mask(data_pres, data_mask, 1))
data_pres_sev_5.append(self.apply_exceed_mask(data_pres, data_mask, 2))
data_pres_sev_7.append(self.apply_exceed_mask(data_pres, data_mask, 3))
data_wwnd_sev_1.append(self.apply_exceed_mask(data_wwnd, data_mask, 0))
data_wwnd_sev_3.append(self.apply_exceed_mask(data_wwnd, data_mask, 1))
data_wwnd_sev_5.append(self.apply_exceed_mask(data_wwnd, data_mask, 2))
data_wwnd_sev_7.append(self.apply_exceed_mask(data_wwnd, data_mask, 3))
data_maxw_sev_1.append(data_maxw.var_grid[data_mask.grid.values,:,:])
data_dbzs_sev_1.append(data_gen.dbz_grid[data_mask.grid.values,:,:])
data_ctts_sev_1.append(data_gen.ctt_grid[data_mask.grid.values,:,:])
data_uh25_sev_1.append(data_gen.uh25_grid[data_mask.grid.values,:,:])
data_uh03_sev_1.append(data_gen.uh03_grid[data_mask.grid.values,:,:])
data_mask_sev_1.append(data_gen.mask[data_mask.grid.values,:,:])
data_temp_sev_1_patches=self.flatten_list(data_temp_sev_1)
data_temp_sev_3_patches=self.flatten_list(data_temp_sev_3)
data_temp_sev_5_patches=self.flatten_list(data_temp_sev_5)
data_temp_sev_7_patches=self.flatten_list(data_temp_sev_7)
data_evwd_sev_1_patches=self.flatten_list(data_evwd_sev_1)
data_evwd_sev_3_patches=self.flatten_list(data_evwd_sev_3)
data_evwd_sev_5_patches=self.flatten_list(data_evwd_sev_5)
data_evwd_sev_7_patches=self.flatten_list(data_evwd_sev_7)
data_euwd_sev_1_patches=self.flatten_list(data_euwd_sev_1)
data_euwd_sev_3_patches=self.flatten_list(data_euwd_sev_3)
data_euwd_sev_5_patches=self.flatten_list(data_euwd_sev_5)
data_euwd_sev_7_patches=self.flatten_list(data_euwd_sev_7)
data_qvap_sev_1_patches=self.flatten_list(data_qvap_sev_1)
data_qvap_sev_3_patches=self.flatten_list(data_qvap_sev_3)
data_qvap_sev_5_patches=self.flatten_list(data_qvap_sev_5)
data_qvap_sev_7_patches=self.flatten_list(data_qvap_sev_7)
data_pres_sev_1_patches=self.flatten_list(data_pres_sev_1)
data_pres_sev_3_patches=self.flatten_list(data_pres_sev_3)
data_pres_sev_5_patches=self.flatten_list(data_pres_sev_5)
data_pres_sev_7_patches=self.flatten_list(data_pres_sev_7)
data_wwnd_sev_1_patches=self.flatten_list(data_wwnd_sev_1)
data_wwnd_sev_3_patches=self.flatten_list(data_wwnd_sev_3)
data_wwnd_sev_5_patches=self.flatten_list(data_wwnd_sev_5)
data_wwnd_sev_7_patches=self.flatten_list(data_wwnd_sev_7)
data_maxw_sev_1_patches=self.flatten_list(data_maxw_sev_1)
data_dbzs_sev_1_patches=self.flatten_list(data_dbzs_sev_1)
data_ctts_sev_1_patches=self.flatten_list(data_ctts_sev_1)
data_uh25_sev_1_patches=self.flatten_list(data_uh25_sev_1)
data_uh03_sev_1_patches=self.flatten_list(data_uh03_sev_1)
data_mask_sev_1_patches=self.flatten_list(data_mask_sev_1)
data_assemble=xr.Dataset({
'temp_sev_1':(['patch','y','x'], np.array(data_temp_sev_1_patches)), 'temp_sev_3':(['patch','y','x'], np.array(data_temp_sev_3_patches)),
'temp_sev_5':(['patch','y','x'], np.array(data_temp_sev_5_patches)), 'temp_sev_7':(['patch','y','x'], np.array(data_temp_sev_7_patches)),
'evwd_sev_1':(['patch','y','x'], np.array(data_evwd_sev_1_patches)), 'evwd_sev_3':(['patch','y','x'], np.array(data_evwd_sev_3_patches)),
'evwd_sev_5':(['patch','y','x'], np.array(data_evwd_sev_5_patches)), 'evwd_sev_7':(['patch','y','x'], np.array(data_evwd_sev_7_patches)),
'euwd_sev_1':(['patch','y','x'], np.array(data_euwd_sev_1_patches)), 'euwd_sev_3':(['patch','y','x'], np.array(data_euwd_sev_3_patches)),
'euwd_sev_5':(['patch','y','x'], np.array(data_euwd_sev_5_patches)), 'euwd_sev_7':(['patch','y','x'], np.array(data_euwd_sev_7_patches)),
'qvap_sev_1':(['patch','y','x'], np.array(data_qvap_sev_1_patches)), 'qvap_sev_3':(['patch','y','x'], np.array(data_qvap_sev_3_patches)),
'qvap_sev_5':(['patch','y','x'], np.array(data_qvap_sev_5_patches)), 'qvap_sev_7':(['patch','y','x'], np.array(data_qvap_sev_7_patches)),
'pres_sev_1':(['patch','y','x'], np.array(data_pres_sev_1_patches)), 'pres_sev_3':(['patch','y','x'], np.array(data_pres_sev_3_patches)),
'pres_sev_5':(['patch','y','x'], np.array(data_pres_sev_5_patches)), 'pres_sev_7':(['patch','y','x'], np.array(data_pres_sev_7_patches)),
'wwnd_sev_1':(['patch','y','x'], np.array(data_wwnd_sev_1_patches)), 'wwnd_sev_3':(['patch','y','x'], np.array(data_wwnd_sev_3_patches)),
'wwnd_sev_5':(['patch','y','x'], np.array(data_wwnd_sev_5_patches)), 'wwnd_sev_7':(['patch','y','x'], np.array(data_wwnd_sev_7_patches)),
'maxw_sev_1':(['patch','y','x'], np.array(data_maxw_sev_1_patches)), 'dbzs_sev_1':(['patch','y','x'], np.array(data_dbzs_sev_1_patches)),
'ctts_sev_1':(['patch','y','x'], np.array(data_ctts_sev_1_patches)), 'uh25_sev_1':(['patch','y','x'], np.array(data_uh25_sev_1_patches)),
'uh03_sev_1':(['patch','y','x'], np.array(data_uh03_sev_1_patches)), 'mask_sev_1':(['patch','y','x'], np.array(data_mask_sev_1_patches))})
data_assemble.to_netcdf(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_{time.strftime('%m')}.nc")
print(f"Exceedances for {time.strftime('%m')} complete...")
def create_files_notexceed_threshold(self, month_int):
"""Create files containing environment patches for storms that did not exceed the threshold.
Data files being opened contain the storm patches, not the full CONUS WRF domain.
Args:
month_int (int): Month for analysis.
"""
time_temp=self.generate_time_month(month_int)
data_temp_sev_1=[]; data_temp_sev_3=[]; data_temp_sev_5=[]; data_temp_sev_7=[]; data_evwd_sev_1=[]; data_evwd_sev_3=[]
data_euwd_sev_1=[]; data_euwd_sev_3=[]; data_euwd_sev_5=[]; data_euwd_sev_7=[]; data_evwd_sev_5=[]; data_evwd_sev_7=[]
data_qvap_sev_1=[]; data_qvap_sev_3=[]; data_qvap_sev_5=[]; data_qvap_sev_7=[]; data_dbzs_sev_1=[]; data_maxw_sev_1=[]
data_pres_sev_1=[]; data_pres_sev_3=[]; data_pres_sev_5=[]; data_pres_sev_7=[]; data_ctts_sev_1=[]; data_mask_sev_1=[]
data_wwnd_sev_1=[]; data_wwnd_sev_3=[]; data_wwnd_sev_5=[]; data_wwnd_sev_7=[]; data_uh25_sev_1=[]; data_uh03_sev_1=[]
for time in time_temp:
print(f"opening files for {time.strftime('%Y')}{time.strftime('%m')}")
data_mask=xr.open_mfdataset(
f"/{self.working_directory}/{self.climate}_indx{self.threshold1}_{self.mask_str}_{time.strftime('%Y')}{time.strftime('%m')}.nc",
combine='by_coords')
data_temp=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_tk_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_evwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_ev_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_euwd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_eu_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_qvap=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_qvapor_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_pres=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_p_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_wwnd=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_w_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_maxw=xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_maxw_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_gen =xr.open_mfdataset(f"/{self.stormpatch_path}/{self.climate}_SP3hourly_{time.strftime('%Y%m')}*.nc", combine='by_coords')
data_temp_sev_1.append(self.apply_notexceed_mask(data_temp, data_mask, 0))
data_temp_sev_3.append(self.apply_notexceed_mask(data_temp, data_mask, 1))
data_temp_sev_5.append(self.apply_notexceed_mask(data_temp, data_mask, 2))
data_temp_sev_7.append(self.apply_notexceed_mask(data_temp, data_mask, 3))
data_evwd_sev_1.append(self.apply_notexceed_mask(data_evwd, data_mask, 0))
data_evwd_sev_3.append(self.apply_notexceed_mask(data_evwd, data_mask, 1))
data_evwd_sev_5.append(self.apply_notexceed_mask(data_evwd, data_mask, 2))
data_evwd_sev_7.append(self.apply_notexceed_mask(data_evwd, data_mask, 3))
data_euwd_sev_1.append(self.apply_notexceed_mask(data_euwd, data_mask, 0))
data_euwd_sev_3.append(self.apply_notexceed_mask(data_euwd, data_mask, 1))
data_euwd_sev_5.append(self.apply_notexceed_mask(data_euwd, data_mask, 2))
data_euwd_sev_7.append(self.apply_notexceed_mask(data_euwd, data_mask, 3))
data_qvap_sev_1.append(self.apply_notexceed_mask(data_qvap, data_mask, 0))
data_qvap_sev_3.append(self.apply_notexceed_mask(data_qvap, data_mask, 1))
data_qvap_sev_5.append(self.apply_notexceed_mask(data_qvap, data_mask, 2))
data_qvap_sev_7.append(self.apply_notexceed_mask(data_qvap, data_mask, 3))
data_pres_sev_1.append(self.apply_notexceed_mask(data_pres, data_mask, 0))
data_pres_sev_3.append(self.apply_notexceed_mask(data_pres, data_mask, 1))
data_pres_sev_5.append(self.apply_notexceed_mask(data_pres, data_mask, 2))
data_pres_sev_7.append(self.apply_notexceed_mask(data_pres, data_mask, 3))
data_wwnd_sev_1.append(self.apply_notexceed_mask(data_wwnd, data_mask, 0))
data_wwnd_sev_3.append(self.apply_notexceed_mask(data_wwnd, data_mask, 1))
data_wwnd_sev_5.append(self.apply_notexceed_mask(data_wwnd, data_mask, 2))
data_wwnd_sev_7.append(self.apply_notexceed_mask(data_wwnd, data_mask, 3))
data_maxw_sev_1.append(np.delete(data_maxw.var_grid.values, data_mask.grid.values, axis=0))
data_dbzs_sev_1.append(np.delete(data_gen.dbz_grid.values, data_mask.grid.values, axis=0))
data_ctts_sev_1.append(np.delete(data_gen.ctt_grid.values, data_mask.grid.values, axis=0))
data_uh25_sev_1.append(np.delete(data_gen.uh25_grid.values, data_mask.grid.values, axis=0))
data_uh03_sev_1.append(np.delete(data_gen.uh03_grid.values, data_mask.grid.values, axis=0))
data_mask_sev_1.append(np.delete(data_gen.mask.values, data_mask.grid.values, axis=0))
data_temp_sev_1_patches=self.flatten_arraylist(data_temp_sev_1)
data_temp_sev_3_patches=self.flatten_arraylist(data_temp_sev_3)
data_temp_sev_5_patches=self.flatten_arraylist(data_temp_sev_5)
data_temp_sev_7_patches=self.flatten_arraylist(data_temp_sev_7)
data_evwd_sev_1_patches=self.flatten_arraylist(data_evwd_sev_1)
data_evwd_sev_3_patches=self.flatten_arraylist(data_evwd_sev_3)
data_evwd_sev_5_patches=self.flatten_arraylist(data_evwd_sev_5)
data_evwd_sev_7_patches=self.flatten_arraylist(data_evwd_sev_7)
data_euwd_sev_1_patches=self.flatten_arraylist(data_euwd_sev_1)
data_euwd_sev_3_patches=self.flatten_arraylist(data_euwd_sev_3)
data_euwd_sev_5_patches=self.flatten_arraylist(data_euwd_sev_5)
data_euwd_sev_7_patches=self.flatten_arraylist(data_euwd_sev_7)
data_qvap_sev_1_patches=self.flatten_arraylist(data_qvap_sev_1)
data_qvap_sev_3_patches=self.flatten_arraylist(data_qvap_sev_3)
data_qvap_sev_5_patches=self.flatten_arraylist(data_qvap_sev_5)
data_qvap_sev_7_patches=self.flatten_arraylist(data_qvap_sev_7)
data_pres_sev_1_patches=self.flatten_arraylist(data_pres_sev_1)
data_pres_sev_3_patches=self.flatten_arraylist(data_pres_sev_3)
data_pres_sev_5_patches=self.flatten_arraylist(data_pres_sev_5)
data_pres_sev_7_patches=self.flatten_arraylist(data_pres_sev_7)
data_wwnd_sev_1_patches=self.flatten_arraylist(data_wwnd_sev_1)
data_wwnd_sev_3_patches=self.flatten_arraylist(data_wwnd_sev_3)
data_wwnd_sev_5_patches=self.flatten_arraylist(data_wwnd_sev_5)
data_wwnd_sev_7_patches=self.flatten_arraylist(data_wwnd_sev_7)
data_maxw_sev_1_patches=self.flatten_arraylist(data_maxw_sev_1)
data_dbzs_sev_1_patches=self.flatten_arraylist(data_dbzs_sev_1)
data_ctts_sev_1_patches=self.flatten_arraylist(data_ctts_sev_1)
data_uh25_sev_1_patches=self.flatten_arraylist(data_uh25_sev_1)
data_uh03_sev_1_patches=self.flatten_arraylist(data_uh03_sev_1)
data_mask_sev_1_patches=self.flatten_arraylist(data_mask_sev_1)
data_assemble=xr.Dataset({
'temp_sev_1':(['patch','y','x'], np.array(data_temp_sev_1_patches)), 'temp_sev_3':(['patch','y','x'], np.array(data_temp_sev_3_patches)),
'temp_sev_5':(['patch','y','x'], np.array(data_temp_sev_5_patches)), 'temp_sev_7':(['patch','y','x'], np.array(data_temp_sev_7_patches)),
'evwd_sev_1':(['patch','y','x'], np.array(data_evwd_sev_1_patches)), 'evwd_sev_3':(['patch','y','x'], np.array(data_evwd_sev_3_patches)),
'evwd_sev_5':(['patch','y','x'], np.array(data_evwd_sev_5_patches)), 'evwd_sev_7':(['patch','y','x'], np.array(data_evwd_sev_7_patches)),
'euwd_sev_1':(['patch','y','x'], np.array(data_euwd_sev_1_patches)), 'euwd_sev_3':(['patch','y','x'], np.array(data_euwd_sev_3_patches)),
'euwd_sev_5':(['patch','y','x'], np.array(data_euwd_sev_5_patches)), 'euwd_sev_7':(['patch','y','x'], np.array(data_euwd_sev_7_patches)),
'qvap_sev_1':(['patch','y','x'], np.array(data_qvap_sev_1_patches)), 'qvap_sev_3':(['patch','y','x'], np.array(data_qvap_sev_3_patches)),
'qvap_sev_5':(['patch','y','x'], np.array(data_qvap_sev_5_patches)), 'qvap_sev_7':(['patch','y','x'], | np.array(data_qvap_sev_7_patches) | numpy.array |
from __future__ import print_function, division
import sys
import numpy as np
import csv
import os
import glob
import plotly
import plotly.graph_objs as go
def parse_stats(statsfn):
stats = {
'tp': [],
'fp': [],
'fn': [],
'num_input_sv': [],
'num_bp_away_from_sv_and_cents_and_telos': [],
'num_bp_away_from_cents_and_telos': [],
'numbp': [],
'num_sv_away_from_cents_and_telos': [],
'datasets': [],
}
labels = {}
with open(statsfn) as F:
reader = csv.DictReader(F, delimiter='\t')
for row in reader:
stats['tp'].append(float(row['num_bp_replaced_by_sv']))
stats['fp'].append(float(row['num_non_sv']))
stats['fn'].append(float(row['num_lone_sv']))
stats['num_sv_away_from_cents_and_telos'].append(float(row['num_bp_replaced_by_sv']) + float(row['num_lone_sv']))
stats['datasets'].append(row['dataset'])
for K in ('tp', 'fp', 'fn', 'num_input_sv', 'num_bp_away_from_sv_and_cents_and_telos', 'num_bp_away_from_cents_and_telos', 'num_sv_away_from_cents_and_telos'):
stats[K] = np.array(stats[K], dtype=np.float)
stats['numbp'] = stats['tp'] + stats['fp']
labels['numbp'] = stats['datasets']
stats['precision'] = stats['tp'] / (stats['tp'] + stats['fp'])
stats['recall'] = stats['tp'] / (stats['tp'] + stats['fn'])
labels['precision'] = labels['recall'] = stats['datasets']
oldlen = len(stats['precision'])
assert oldlen == len(stats['recall'])
notnan_idxs = np.logical_not(np.logical_or(np.isnan(stats['precision']), np.isnan(stats['recall'])))
stats['precision'] = stats['precision'][notnan_idxs]
stats['recall'] = stats['recall'][notnan_idxs]
assert len(stats['precision']) == len(stats['recall'])
#print(statsfn, 'has', oldlen - len(stats['precision']), 'nan')
stats['nonsv_ratio'] = (stats['fp'] + 1) / (stats['num_sv_away_from_cents_and_telos'] + 1)
assert np.count_nonzero(np.isnan(stats['nonsv_ratio'])) == 0
assert np.count_nonzero(stats['nonsv_ratio'] == 0) == 0
stats['nonsv_ratio'] = np.log2(stats['nonsv_ratio'])
labels['nonsv_ratio'] = ['%s (nonsv = %s, sv = %s)' % (stats['datasets'][idx], stats['fp'][idx], stats['num_sv_away_from_cents_and_telos'][idx]) for idx in range(len(stats['datasets']))]
return (stats, labels)
def scatter(traces, title, xtitle, ytitle, outfn, logx = False, xmin = None, xmax = None,):
xaxis = {
'title': xtitle,
'type': logx and 'log' or 'linear',
'range': [xmin, xmax],
}
layout = go.Layout(
title = title,
hovermode = 'closest',
xaxis = xaxis,
yaxis = {
'title': ytitle,
},
)
fig = go.Figure(data=traces, layout=layout)
plotly.offline.plot(fig, filename=outfn)
def cdf(arr, labels=None):
sorted_idxs = np.argsort(arr)
ret = [
arr[sorted_idxs],
np.linspace(0, 1, len(arr), endpoint=False),
]
if labels is not None:
ret.append([labels[idx] for idx in sorted_idxs])
return tuple(ret)
def plot_method_combos(statsfns):
xvals, yvals, xerrors, yerrors = [], [], [], []
runs = []
for statsfn in statsfns:
run = os.path.basename(statsfn).split('.')[1]
runs.append(run)
stats, _ = parse_stats(statsfn)
xvals.append(np.mean(stats['recall']))
yvals.append(np.mean(stats['precision']))
xerrors.append(np.std(stats['recall']))
yerrors.append( | np.std(stats['precision']) | numpy.std |
import gemmi
import numpy as np
import pytest
from pandas.testing import assert_index_equal
import reciprocalspaceship as rs
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("reset_index", [True, False])
@pytest.mark.parametrize("anomalous", [True, False])
def test_hkl_to_asu(mtz_by_spacegroup, inplace, reset_index, anomalous):
"""Test DataSet.hkl_to_asu() for common spacegroups"""
expected = rs.read_mtz(mtz_by_spacegroup)
p1 = rs.read_mtz(mtz_by_spacegroup[:-4] + "_p1.mtz")
p1.spacegroup = expected.spacegroup
# Add complex structure factors
p1["sf"] = p1.to_structurefactor("FMODEL", "PHIFMODEL")
expected["sf"] = expected.to_structurefactor("FMODEL", "PHIFMODEL")
if reset_index:
p1.reset_index(inplace=True)
result = p1.hkl_to_asu(inplace=inplace, anomalous=anomalous)
if reset_index:
result.set_index(["H", "K", "L"], inplace=True)
# Confirm inplace
if inplace:
assert id(result) == id(p1)
else:
assert id(result) != id(p1)
# Confirm centric reflections are always in +ASU
assert len(expected.centrics.index.difference(result.centrics.index)) == 0
assert len(result.centrics.index.difference(expected.centrics.index)) == 0
# If anomalous=True, acentric reflections were mapped to the Friedel-minus ASU.
# To test these reflections against `expected` we will map them back to the
# Friedel-plus ASU
# Note:
# - `result` no longer has a unique MultiIndex after this
if anomalous:
result.reset_index(inplace=True)
acentric = ~result.label_centrics()["CENTRIC"]
friedel_minus = result["M/ISYM"] % 2 == 0
result[friedel_minus & acentric] = result[friedel_minus & acentric].apply_symop(
"-x,-y,-z"
)
result.set_index(["H", "K", "L"], inplace=True)
assert len(result.index.difference(expected.index)) == 0
assert len(expected.index.difference(result.index)) == 0
# Confirm structure factor amplitudes are always unchanged
assert np.allclose(
expected.loc[result.index, "FMODEL"].to_numpy(), result["FMODEL"].to_numpy()
)
# Confirm phase changes are applied by comparing complex structure factors
expected_sf = expected.loc[result.index].to_structurefactor("FMODEL", "PHIFMODEL")
result_sf = result.to_structurefactor("FMODEL", "PHIFMODEL")
assert np.allclose(result_sf, expected_sf)
# Confirm phase changes were applied to complex structure factors in DataSet
assert np.allclose(np.abs(result["sf"]), np.abs(expected.loc[result.index, "sf"]))
assert np.allclose(result["sf"], expected.loc[result.index, "sf"])
def test_hklmapping_roundtrip_phase(mtz_by_spacegroup):
"""
Test roundtrip of DataSet.hkl_to_asu() and DataSet.hkl_to_observed() preserve
phases
"""
ref = rs.read_mtz(mtz_by_spacegroup)
expected = rs.read_mtz(mtz_by_spacegroup[:-4] + "_p1.mtz")
expected["sf"] = expected.to_structurefactor("FMODEL", "PHIFMODEL")
expected.spacegroup = ref.spacegroup
# Roundtrip
temp = expected.hkl_to_asu()
result = temp.hkl_to_observed()
# Check indices
assert_index_equal(result.index, expected.index)
# Confirm phase changes are applied by comparing as complex structure factors
expected_sf = expected.loc[result.index].to_structurefactor("FMODEL", "PHIFMODEL")
result_sf = result.to_structurefactor("FMODEL", "PHIFMODEL")
assert np.allclose(np.abs(result_sf), | np.abs(expected_sf) | numpy.abs |
"""
Plot comparisons to understand contributions of SIC dependent on the
thickness initial conditions for the regional experiments.
Notes
-----
Author : <NAME>
Date : 9 October 2018
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import nclcmaps as ncm
import datetime
import read_MonthlyOutput as MO
import calc_Utilities as UT
import cmocean
### Define directories
directorydata = '/surtsey/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/'
#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting SIC contributions - %s----' % titletime)
### Alott time series
year1 = 1900
year2 = 2000
years = np.arange(year1,year2+1,1)
### Call arguments
varnames = ['Z500','Z30','SLP','T2M','U10','U300','SWE','THICK','P','EGR',
'RNET']
varnames = ['Z500']
runnames = [r'CIT',r'FPOL',r'FSUB']
experiments = [r'\textbf{$\Delta$POLAR}',r'\textbf{$\Delta$SUBPOLAR}',r'\textbf{difference}']
period = 'DJF'
for v in range(len(varnames)):
### Call function for surface temperature data from reach run
lat,lon,time,lev,tascit = MO.readExperi(directorydata,
'%s' % varnames[v],'CIT','surface')
lat,lon,time,lev,tasfic = MO.readExperi(directorydata,
'%s' % varnames[v],'FPOL','surface')
lat,lon,time,lev,tasfict = MO.readExperi(directorydata,
'%s' % varnames[v],'FSUB','surface')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### Concatonate runs
runs = [tascit,tasfic,tasfict]
### Separate per periods (ON,DJ,FM)
if period == 'ON':
tas_mo = np.empty((4,tascit.shape[0],tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = np.nanmean(runs[i][:,9:11,:,:],axis=1)
elif period == 'DJ':
tas_mo = np.empty((4,tascit.shape[0]-1,tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i],tas_mo[i] = UT.calcDecJan(runs[i],runs[i],lat,
lon,'surface',1)
elif period == 'FM':
tas_mo= np.empty((4,tascit.shape[0],tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = np.nanmean(runs[i][:,1:3,:,:],axis=1)
elif period == 'DJF':
tas_mo= np.empty((4,tascit.shape[0]-1,tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i],tas_mo[i] = UT.calcDecJanFeb(runs[i],runs[i],lat,
lon,'surface',1)
elif period == 'M':
tas_mo= np.empty((4,tascit.shape[0],tascit.shape[2],tascit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = runs[i][:,2,:,:]
else:
ValueError('Wrong period selected! (ON,DJ,FM)')
### Composite by QBO phase
tas_mocit = tas_mo[0][:,:,:]
tas_mofic = tas_mo[1][:,:,:]
tas_mofict = tas_mo[2][:,:,:]
### Compute comparisons for months - taken ensemble average
ficcit = np.nanmean(tas_mofic - tas_mocit,axis=0)
fictfit = np.nanmean(tas_mofict - tas_mocit,axis=0)
difference = ficcit - fictfit
diffruns_mo = [ficcit,fictfit,difference]
### Calculate significance for FM
stat_FICCIT,pvalue_FICCIT = UT.calc_indttest(tas_mofic,tas_mocit)
stat_FICTFIT,pvalue_FICTFIT = UT.calc_indttest(tas_mofict,tas_mocit)
stat_difference,pvalue_difference = UT.calc_indttest(tas_mofic - tas_mocit,
tas_mofict - tas_mocit)
pruns_mo = [pvalue_FICCIT,pvalue_FICTFIT,pvalue_difference]
###########################################################################
###########################################################################
###########################################################################
#### Plot T2M
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure()
for i in range(len(experiments)):
var = diffruns_mo[i]
pvar = pruns_mo[i]
### Set limits for contours and colorbars
if varnames[v] == 'T2M':
limit = np.arange(-10,10.1,0.5)
barlim = np.arange(-10,11,5)
elif varnames[v] == 'Z500':
limit = np.arange(-60,60.1,1)
barlim = np.arange(-60,61,30)
elif varnames[v] == 'Z30':
limit = np.arange(-100,100.1,5)
barlim = | np.arange(-100,101,50) | numpy.arange |
#!/usr/bin/env python
"""
EM algorithm for learning rhyming words and rhyme schemes with independent stanzas.
Original implementation: <NAME> (<EMAIL>), 2011.
"""
from __future__ import division, print_function, unicode_literals
import argparse
import json
import logging
import os
import sys
from collections import defaultdict, OrderedDict
from difflib import SequenceMatcher
import numpy
from rhymediscovery import celex
def load_stanzas(stanzas_file):
"""
Load stanzas from gold standard file
"""
f = stanzas_file.readlines()
stanzas = []
for i, line in enumerate(f):
if i % 4 == 0:
stanza_words = line.strip().split()[1:]
stanzas.append(Stanza(stanza_words))
return stanzas
class Stanza:
def __init__(self, stanza_words):
self.words = tuple(stanza_words) # Sequence of final words
self.word_indices = None # Indices of words with respect to global wordlist
def set_word_indices(self, wordlist):
"""
Populate the list of word_indices, mapping self.words to the given wordlist
"""
self.word_indices = [wordlist.index(word) for word in self.words]
def __str__(self):
return ' '.join(self.words)
def __len__(self):
return len(self.words)
class Schemes:
"""
Stores schemes loaded from schemes.json
"""
def __init__(self, scheme_file):
self.scheme_file = scheme_file
self.scheme_list, self.scheme_dict = self._parse_scheme_file()
self.num_schemes = len(self.scheme_list)
def _parse_scheme_file(self):
"""
Initialize redundant data structures for lookup optimization
"""
schemes = json.loads(self.scheme_file.read(), object_pairs_hook=OrderedDict)
scheme_list = []
scheme_dict = defaultdict(list)
for scheme_len, scheme_group in schemes.items():
for scheme_str, _count in scheme_group:
scheme_code = tuple(int(c) for c in scheme_str.split(' '))
scheme_list.append(scheme_code)
scheme_dict[int(scheme_len)].append(len(scheme_list) - 1)
return scheme_list, scheme_dict
def get_schemes_for_len(self, n):
"""
Returns the indices of all n-length schemes in self.scheme_list
"""
return self.scheme_dict[n]
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas])))
def get_rhymelists(stanza, scheme):
"""
Returns ordered lists of the stanza's word indices as defined by given scheme
"""
rhymelists = defaultdict(list)
for rhyme_group, word_index in zip(scheme, stanza.word_indices):
rhymelists[rhyme_group].append(word_index)
return list(rhymelists.values())
def init_distance_ttable(wordlist, distance_function):
"""
Initialize pair-wise rhyme strenghts according to the given word distance function
"""
n = len(wordlist)
t_table = numpy.zeros((n, n + 1))
# Initialize P(c|r) accordingly
for r, w in enumerate(wordlist):
for c, v in enumerate(wordlist):
if c < r:
t_table[r, c] = t_table[c, r] # Similarity is symmetric
else:
t_table[r, c] = distance_function(w, v) + 0.001 # For backoff
t_table[:, n] = numpy.mean(t_table[:, :-1], axis=1)
# Normalize
t_totals = numpy.sum(t_table, axis=0)
for i, t_total in enumerate(t_totals.tolist()):
t_table[:, i] /= t_total
return t_table
def init_uniform_ttable(wordlist):
"""
Initialize (normalized) theta uniformly
"""
n = len(wordlist)
return | numpy.ones((n, n + 1)) | numpy.ones |
import scipy.io as sio
import numpy as np
import pickle
import torch
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch.nn as nn
from model.CNN import CNN
from utils.DataLoader import ECGDataset, ecg_collate_func
import sys
import argparse
import os
data_dirc = 'data/'
RAW_LABELS = np.load(data_dirc+'raw_labels.npy')
PERMUTATION = | np.load(data_dirc+'random_permutation.npy') | numpy.load |
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import streakline
#import streakline2
import myutils
import ffwd
from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior
#import streams
import astropy
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
import astropy.coordinates as coord
import gala.coordinates as gc
import scipy.linalg as la
import scipy.interpolate
import scipy.optimize
import zscale
import itertools
import copy
import pickle
# observers
# defaults taken as in astropy v2.0 icrs
mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
MASK = -9999
pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
#pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
class Stream():
def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'):
"""Initialize """
setup = {}
if progenitor['coords']=='galactocentric':
setup['x0'] = x0
setup['v0'] = v0
elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0):
if progenitor['pm_polar']:
a = v0[1].value
phi = v0[2].value
v0[1] = a*np.sin(phi)*u.mas/u.yr
v0[2] = a*np.cos(phi)*u.mas/u.yr
# convert positions
xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer'])
xgal = xeq.transform_to(coord.Galactocentric)
setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc
# convert velocities
setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun)
#setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s
else:
raise ValueError('Observer position needed!')
setup['dr'] = dr
setup['dv'] = dv
setup['minit'] = minit
setup['mfinal'] = mfinal
setup['rcl'] = rcl
setup['dt'] = dt
setup['age'] = age
setup['nstars'] = nstars
setup['integrator'] = integrator
setup['potential'] = potential
setup['pparams'] = pparams
self.setup = setup
self.setup_aux = {}
self.fill_intid()
self.fill_potid()
self.st_params = self.format_input()
def fill_intid(self):
"""Assign integrator ID for a given integrator choice
Assumes setup dictionary has an 'integrator' key"""
if self.setup['integrator']=='lf':
self.setup_aux['iaux'] = 0
elif self.setup['integrator']=='rk':
self.setup_aux['iaux'] = 1
def fill_potid(self):
"""Assign potential ID for a given potential choice
Assumes d has a 'potential' key"""
if self.setup['potential']=='nfw':
self.setup_aux['paux'] = 3
elif self.setup['potential']=='log':
self.setup_aux['paux'] = 2
elif self.setup['potential']=='point':
self.setup_aux['paux'] = 0
elif self.setup['potential']=='gal':
self.setup_aux['paux'] = 4
elif self.setup['potential']=='lmc':
self.setup_aux['paux'] = 6
elif self.setup['potential']=='dipole':
self.setup_aux['paux'] = 8
elif self.setup['potential']=='quad':
self.setup_aux['paux'] = 9
elif self.setup['potential']=='octu':
self.setup_aux['paux'] = 10
def format_input(self):
"""Format input parameters for streakline.stream"""
p = [None]*12
# progenitor position
p[0] = self.setup['x0'].si.value
p[1] = self.setup['v0'].si.value
# potential parameters
p[2] = [x.si.value for x in self.setup['pparams']]
# stream smoothing offsets
p[3] = [self.setup['dr'], self.setup['dv'].si.value]
# potential and integrator choice
p[4] = self.setup_aux['paux']
p[5] = self.setup_aux['iaux']
# number of steps and stream stars
p[6] = int(self.setup['age']/self.setup['dt'])
p[7] = int(p[6]/self.setup['nstars'])
# cluster properties
p[8] = self.setup['minit'].si.value
p[9] = self.setup['mfinal'].si.value
p[10] = self.setup['rcl'].si.value
# time step
p[11] = self.setup['dt'].si.value
return p
def generate(self):
"""Create streakline model for a stream of set parameters"""
#xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p)
stream = streakline.stream(*self.st_params)
self.leading = {}
self.leading['x'] = stream[:3]*u.m
self.leading['v'] = stream[6:9]*u.m/u.s
self.trailing = {}
self.trailing['x'] = stream[3:6]*u.m
self.trailing['v'] = stream[9:12]*u.m/u.s
def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None):
"""Observe the stream
stream.obs holds all observations
stream.err holds all errors"""
x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc
v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s
if mode=='cartesian':
# returns coordinates in following order
# x(x, y, z), v(vx, vy, vz)
if len(units)<2:
units.append(self.trailing['x'].unit)
units.append(self.trailing['v'].unit)
if len(errors)<2:
errors.append(0.2*u.kpc)
errors.append(2*u.km/u.s)
# positions
x = x.to(units[0])
ex = np.ones(np.shape(x))*errors[0]
ex = ex.to(units[0])
# velocities
v = v.to(units[1])
ev = np.ones(np.shape(v))*errors[1]
ev = ev.to(units[1])
self.obs = np.concatenate([x,v]).value
self.err = np.concatenate([ex,ev]).value
elif mode=='equatorial':
# assumes coordinates in the following order:
# ra, dec, distance, vrad, mualpha, mudelta
if len(units)!=6:
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
if len(errors)!=6:
errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr]
# define reference frame
xgal = coord.Galactocentric(x, **observer)
#frame = coord.Galactocentric(**observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, v, **vobs)
# store coordinates
ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
obs = np.hstack([ra, dec, dist, vr, mua, mud]).value
obs = np.reshape(obs,(6,-1))
if footprint=='sdss':
infoot = dec > -2.5*u.deg
obs = obs[:,infoot]
if np.allclose(rotmatrix, np.eye(3))!=1:
xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix)
obs[0] = xi
obs[1] = eta
self.obs = obs
# store errors
err = np.ones(np.shape(self.obs))
if logerr:
for i in range(6):
err[i] *= np.exp(errors[i].to(units[i]).value)
else:
for i in range(6):
err[i] *= errors[i].to(units[i]).value
self.err = err
self.obsunit = units
self.obserror = errors
# randomly select nstars from the stream
if nstars>-1:
if sequential:
select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int)
else:
select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars)
self.obs = self.obs[:,select]
self.err = self.err[:,select]
# include only designated dimensions
if len(present)>0:
self.obs = self.obs[present]
self.err = self.err[present]
self.obsunit = [ self.obsunit[x] for x in present ]
self.obserror = [ self.obserror[x] for x in present ]
def prog_orbit(self):
"""Generate progenitor orbital history"""
orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1)
self.orbit = {}
self.orbit['x'] = orbit[:3]*u.m
self.orbit['v'] = orbit[3:]*u.m/u.s
def project(self, name, N=1000, nbatch=-1):
"""Project the stream from observed to native coordinates"""
poly = np.loadtxt("../data/{0:s}_all.txt".format(name))
self.streak = np.poly1d(poly)
self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N)
self.streak_y = np.polyval(self.streak, self.streak_x)
self.streak_b = np.zeros(N)
self.streak_l = np.zeros(N)
pdot = np.polyder(poly)
for i in range(N):
length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,))
self.streak_l[i] = length[0]
XB = np.transpose(np.vstack([self.streak_x, self.streak_y]))
n = np.shape(self.obs)[1]
if nbatch<0:
nstep = 0
nbatch = -1
else:
nstep = np.int(n/nbatch)
i1 = 0
i2 = nbatch
for i in range(nstep):
XA = np.transpose(np.vstack([np.array(self.obs[0][i1:i2]), np.array(self.obs[1][i1:i2])]))
self.emdist(XA, XB, i1=i1, i2=i2)
i1 += nbatch
i2 += nbatch
XA = np.transpose(np.vstack([np.array(self.catalog['ra'][i1:]), np.array(self.catalog['dec'][i1:])]))
self.emdist(XA, XB, i1=i1, i2=n)
#self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header')
def emdist(self, XA, XB, i1=0, i2=-1):
""""""
distances = scipy.spatial.distance.cdist(XA, XB)
self.catalog['b'][i1:i2] = np.min(distances, axis=1)
imin = np.argmin(distances, axis=1)
self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[imin]] *= -1
self.catalog['l'][i1:i2] = self.streak_l[imin]
def _delta_path(self, x, pdot):
"""Return integrand for calculating length of a path along a polynomial"""
return np.sqrt(1 + np.polyval(pdot, x)**2)
def plot(self, mode='native', fig=None, color='k', **kwargs):
"""Plot stream"""
# Plotting
if fig==None:
plt.close()
plt.figure()
ax = plt.axes([0.12,0.1,0.8,0.8])
if mode=='native':
# Color setup
cindices = np.arange(self.setup['nstars']) # colors of stream particles
nor = mpl.colors.Normalize(vmin=0, vmax=self.setup['nstars']) # colormap normalization
plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3)
plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.xlabel("X (kpc)")
plt.ylabel("Z (kpc)")
elif mode=='observed':
plt.subplot(221)
plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Dec")
plt.subplot(223)
plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Distance")
plt.subplot(222)
plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\\alpha$")
plt.subplot(224)
plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\delta$")
plt.tight_layout()
#plt.minorticks_on()
def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}):
"""Read stream star positions from a file"""
t = np.loadtxt(fname).T
n = np.shape(t)[1]
ns = int((n-1)/2)
self.setup['nstars'] = ns
# progenitor
self.setup['x0'] = t[:3,0] * units['x']
self.setup['v0'] = t[3:,0] * units['v']
# leading tail
self.leading = {}
self.leading['x'] = t[:3,1:ns+1] * units['x']
self.leading['v'] = t[3:,1:ns+1] * units['v']
# trailing tail
self.trailing = {}
self.trailing['x'] = t[:3,ns+1:] * units['x']
self.trailing['v'] = t[3:,ns+1:] * units['v']
def save(self, fname):
"""Save stream star positions to a file"""
# define table
t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
# add progenitor info
t.add_row(np.ravel([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value]))
# add leading tail infoobsmode
tt = Table(np.concatenate((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# add trailing tail info
tt = Table(np.concatenate((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# save to file
t.write(fname, format='ascii.commented_header')
# make a streakline model of a stream
def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
################################
# Plot observed stream and model
if graph:
observed = load_stream(name)
Ndim = np.shape(observed.obs)[0]
modcol = 'k'
obscol = 'orange'
ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(12,4))
for i in range(3):
plt.sca(ax[i])
plt.gca().invert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream')
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model')
if i==0:
plt.legend(frameon=False, handlelength=0.5, fontsize='small')
plt.tight_layout()
if graphsave:
plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150)
return stream
def progenitor_params(n):
"""Return progenitor parameters for a given stream"""
if n==-1:
age = 1.6*u.Gyr
mi = 1e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = gd1_coordinates(observer=mw_observer)
elif n==-2:
age = 2.7*u.Gyr
mi = 1e5*u.Msun
mf = 2e4*u.Msun
x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0)
elif n==-3:
age = 3.5*u.Gyr
mi = 5e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = tri_coordinates(observer=mw_observer)
elif n==-4:
age = 2*u.Gyr
mi = 2e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = atlas_coordinates(observer=mw_observer)
out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf}
return out
def gal2eq(x, v, observer=mw_observer, vobs=vsun0):
""""""
# define reference frame
xgal = coord.Galactocentric(np.array(x)[:,np.newaxis]*u.kpc, **observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, np.array(v)[:,np.newaxis]*u.km/u.s, **vobs)
# store coordinates
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
return(xobs, vobs)
def gd1_coordinates(observer=mw_observer):
"""Approximate GD-1 progenitor coordinates"""
x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-90, -250, -120]
return (x0, v0)
def pal5_coordinates(observer=mw_observer, vobs=vsun0):
"""Pal5 coordinates"""
# sdss
ra = 229.0128*u.deg
dec = -0.1082*u.deg
# bob's rrlyrae
d = 21.7*u.kpc
# harris
#d = 23.2*u.kpc
# odenkirchen 2002
vr = -58.7*u.km/u.s
# fritz & kallivayalil 2015
mua = -2.296*u.mas/u.yr
mud = -2.257*u.mas/u.yr
d = 24*u.kpc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer)
x0 = x.galactocentric
v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s)
return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist())
def tri_coordinates(observer=mw_observer):
"""Approximate Triangulum progenitor coordinates"""
x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-40, 155, 155]
return (x0, v0)
def atlas_coordinates(observer=mw_observer):
"""Approximate ATLAS progenitor coordinates"""
x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [40, 150, -120]
return (x0, v0)
# great circle orientation
def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True):
"""Save rotation matrix for a stream model"""
if stream==None:
stream = stream_model(name, pparams0=pparams, dt=dt)
# find the pole
ra = np.radians(stream.obs[0])
dec = np.radians(stream.obs[1])
rx = np.cos(ra) * np.cos(dec)
ry = np.sin(ra) * np.cos(dec)
rz = np.sin(dec)
r = np.column_stack((rx, ry, rz))
# fit the plane
x0 = np.array([0, 1, 0])
lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,))
x0 = lsq.x/np.linalg.norm(lsq.x)
ra0 = np.arctan2(x0[1], x0[0])
dec0 = np.arcsin(x0[2])
ra0 += np.pi
dec0 = np.pi/2 - dec0
# euler rotations
R0 = myutils.rotmatrix(np.degrees(-ra0), 2)
R1 = myutils.rotmatrix(np.degrees(dec0), 1)
R2 = myutils.rotmatrix(0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
# put xi = 50 at the beginning of the stream
xi[xi>180] -= 360
xi += 360
xi0 = np.min(xi) - 50
R2 = myutils.rotmatrix(-xi0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
if save:
np.save('../data/rotmatrix_{}'.format(name), R)
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
mock['rotmatrix'] = R
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
if graph:
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.plot(stream.obs[0], stream.obs[1], 'ko')
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
plt.sca(ax[1])
plt.plot(xi, eta, 'ko')
plt.xlabel('$\\xi$ (deg)')
plt.ylabel('$\\eta$ (deg)')
plt.ylim(-5, 5)
plt.tight_layout()
plt.savefig('../plots/gc_orientation_{}.png'.format(name))
return R
def wfit_plane(x, r, p=None):
"""Fit a plane to a set of 3d points"""
Np = np.shape(r)[0]
if np.any(p)==None:
p = np.ones(Np)
Q = np.zeros((3,3))
for i in range(Np):
Q += p[i]**2 * np.outer(r[i], r[i])
x = x/np.linalg.norm(x)
lsq = np.inner(x, np.inner(Q, x))
return lsq
# observed streams
#def load_stream(n):
#"""Load stream observations"""
#if n==-1:
#observed = load_gd1(present=[0,1,2,3])
#elif n==-2:
#observed = load_pal5(present=[0,1,2,3])
#elif n==-3:
#observed = load_tri(present=[0,1,2,3])
#elif n==-4:
#observed = load_atlas(present=[0,1,2,3])
#return observed
def endpoints(name):
""""""
stream = load_stream(name)
# find endpoints
amin = np.argmin(stream.obs[0])
amax = np.argmax(stream.obs[0])
ra = np.array([stream.obs[0][i] for i in [amin, amax]])
dec = np.array([stream.obs[1][i] for i in [amin, amax]])
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
# rotate endpoints
R = mock['rotmatrix']
xi, eta = myutils.rotate_angles(ra, dec, R)
#xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
mock['ra_range'] = ra
mock['xi_range'] = xi #np.percentile(xi, [10,90])
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
def load_pal5(present, nobs=50, potential='gal'):
""""""
if len(present)==2:
t = Table.read('../data/pal5_members.txt', format='ascii.commented_header')
dist = 21.7
deltadist = 0.7
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.7]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==3:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_gd1(present, nobs=50, potential='gal'):
""""""
if len(present)==3:
t = Table.read('../data/gd1_members.txt', format='ascii.commented_header')
dist = 0
deltadist = 0.5
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
d += t['l']*0.04836 + 9.86
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.5]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/gd1_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs#[np.array(present)]
observed.obsunit = obsunit
observed.err = err#[np.array(present)]
observed.obserror = obserr
return observed
def load_tri(present, nobs=50, potential='gal'):
""""""
if len(present)==4:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
if len(present)==3:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_atlas(present, nobs=50, potential='gal'):
""""""
ra, dec = atlas_track()
n = np.size(ra)
d = np.random.randn(n)*2 + 20
obs = np.array([ra, dec, d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([np.ones(n)*0.05, np.ones(n)*0.05, np.ones(n)*2])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def atlas_track():
""""""
ra0, dec0 = np.radians(77.16), np.radians(46.92 - 90)
# euler rotations
D = np.array([[np.cos(ra0), np.sin(ra0), 0], [-np.sin(ra0), np.cos(ra0), 0], [0, 0, 1]])
C = np.array([[np.cos(dec0), 0, np.sin(dec0)], [0, 1, 0], [-np.sin(dec0), 0, np.cos(dec0)]])
B = np.diag(np.ones(3))
R = np.dot(B, np.dot(C, D))
Rinv = np.linalg.inv(R)
l0 = np.linspace(0, 2*np.pi, 500)
b0 = np.zeros(500)
xeq, yeq, zeq = myutils.eq2car(l0, b0)
eq = np.column_stack((xeq, yeq, zeq))
eq_rot = np.zeros(np.shape(eq))
for i in range(np.size(l0)):
eq_rot[i] = np.dot(Rinv, eq[i])
l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2])
ra_s, dec_s = np.degrees(l0_rot), np.degrees(b0_rot)
ind_s = (ra_s>17) & (ra_s<30)
ra_s = ra_s[ind_s]
dec_s = dec_s[ind_s]
return (ra_s, dec_s)
def fancy_name(n):
"""Return nicely formatted stream name"""
names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'}
return names[n]
# model parameters
def get_varied_pars(vary):
"""Return indices and steps for a preset of varied parameters, and a label for varied parameters
Parameters:
vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof"""
if type(vary) is not list:
vary = [vary]
Nt = len(vary)
vlabel = '_'.join(vary)
pid = []
dp = []
for v in vary:
o1, o2 = get_varied_bytype(v)
pid += o1
dp += o2
return (pid, dp, vlabel)
def get_varied_bytype(vary):
"""Get varied parameter of a particular type"""
if vary=='potential':
pid = [5,6,8,10,11]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun]
elif vary=='bary':
pid = [0,1,2,3,4]
# gd1
dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc]
## atlas & triangulum
#dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc]
# pal5
dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc]
dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
elif vary=='halo':
pid = [5,6,8,10]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
elif vary=='progenitor':
pid = [26,27,28,29,30,31]
dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr]
elif vary=='dipole':
pid = [11,12,13]
#dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)]
dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2]
elif vary=='quad':
pid = [14,15,16,17,18]
dp = [0.5*u.Gyr**-2 for x in range(5)]
elif vary=='octu':
pid = [19,20,21,22,23,24,25]
dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)]
else:
pid = []
dp = []
return (pid, dp)
def get_parlabel(pid):
"""Return label for a list of parameter ids
Parameter:
pid - list of parameter ids"""
master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ]
master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ]
if type(pid) is list:
labels = []
units = []
for i in pid:
labels += [master[i]]
units += [master_units[i]]
else:
labels = master[pid]
units = master_units[pid]
return (labels, units)
def get_steps(Nstep=50, log=False):
"""Return deltax steps in both directions
Paramerets:
Nstep - number of steps in one direction (default: 50)
log - if True, steps are logarithmically spaced (default: False)"""
if log:
step = np.logspace(-10, 1, Nstep)
else:
step = np.linspace(0.1, 10, Nstep)
step = np.concatenate([-step[::-1], step])
return (Nstep, step)
def lmc_position():
""""""
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d)
xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si]
print(xgal)
def lmc_properties():
""""""
# penarrubia 2016
mass = 2.5e11*u.Msun
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d)
cgal1 = c1.transform_to(coord.Galactocentric)
xgal = np.array([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc
return (mass, xgal)
# fit bspline to a stream model
def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit bspline to a stream model and save to file"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
fidsort = np.argsort(stream_fid.obs[0])
ra = np.linspace(np.min(stream_fid.obs[0])*1.05, np.max(stream_fid.obs[0])*0.95, Nobs)
tfid = np.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)]
llabel = 'b-spline fit'
else:
llabel = ''
plt.close()
fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim-1):
plt.sca(ax[0][i])
plt.plot(stream.obs[0], stream.obs[i+1], 'ko')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel)
if fiducial:
fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k)
plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1)
plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial')
plt.ylabel(ylabel[i+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[i][0], ylims[i][1])
plt.sca(ax[1][i])
if fiducial:
yref = fits_fid(stream.obs[0])
ycolor = 'b'
else:
yref = fits[i](stream.obs[0])
ycolor = 'r'
plt.axhline(0, color=ycolor, lw=2)
if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1)
plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko')
if fiducial:
fits_diff = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k)
plt.plot(stream.obs[0][isort], fits_diff(stream.obs[0][isort]), 'r--')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel)
plt.xlabel(ylabel[0])
plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].split(' ')[0]))
if fiducial:
plt.sca(ax[0][Ndim-2])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit each tail individually"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
plt.close()
fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim):
plt.sca(ax[0][i])
Nhalf = int(0.5*np.size(stream.obs[i]))
plt.plot(stream.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:], 'o')
if fiducial:
plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.ylabel(ylabel[i])
plt.sca(ax[1][i])
if fiducial:
plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o')
if fiducial:
plt.sca(ax[0][Ndim-1])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
else:
return fig
def get_stream_limits(n, align=False):
"""Return lists with limiting values in different dimensions"""
if n==-1:
xlims = [260, 100]
ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]]
elif n==-2:
xlims = [250, 210]
ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]]
elif n==-3:
xlims = [27, 17]
ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]]
elif n==-4:
xlims = [35, 10]
ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]]
if align:
ylims[0] = [-5, 5]
xup = [110, 110, 80, 80]
xlims = [xup[np.abs(n)-1], 40]
return (xlims, ylims)
# step sizes for derivatives
def iterate_steps(n):
"""Calculate derivatives for different parameter classes, and plot"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
step_convergence(n, Nstep=10, vary=vary)
choose_step(n, Nstep=10, vary=vary)
def iterate_plotsteps(n):
"""Plot stream models for a variety of model parameters"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
pid, dp, vlabel = get_varied_pars(vary)
for p in range(len(pid)):
plot_steps(n, p=p, Nstep=5, vary=vary, log=False)
def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun):
"""Plot stream for different values of a potential parameter"""
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
pparams0 = pparams_fid
pid, dp, vlabel = get_varied_pars(vary)
plabel, punit = get_parlabel(pid[p])
Nstep, step = get_steps(Nstep=Nstep, log=log)
plt.close()
fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]})
# fiducial model
stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs)
Nobs = 10
k = 3
isort = np.argsort(stream0.obs[0])
ra = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, Nobs)
t = np.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)]
fits = [None]*5
for j in range(5):
fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
color = mpl.cm.RdBu(i/(2*Nstep-1))
#print(i, dp[p], pparams)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
plt.sca(ax[0][j])
plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2)
plt.sca(ax[1][j])
plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[2][j])
plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[3][j])
plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2)
# symmetric derivatives
ra_der = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, 100)
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx = -dy / np.abs(2*step[i]*dp[p])
plt.sca(ax[4][j])
plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i)
# labels, limits
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
for j in range(5):
plt.sca(ax[0][j])
plt.ylabel(ylabel[j+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[j][0], ylims[j][1])
plt.sca(ax[1][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[2][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[3][j])
plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].split(' ')[0], plabel))
plt.sca(ax[4][j])
plt.xlabel(ylabel[0])
plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].split(' ')[0], plabel))
#plt.suptitle('Varying {}'.format(plabel), fontsize='small')
plt.tight_layout()
plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep))
def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=np.nan, Nra=50):
"""Check deviations in numerical derivatives for consecutive step sizes"""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# fiducial model
pparams0 = pparams_fid
stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
if np.any(~np.isfinite(ra_der)):
ra_der = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra)
Nra = np.size(ra_der)
# parameters to vary
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
dpvec = np.array([x.value for x in dp])
Nstep, step = get_steps(Nstep=Nstep, log=log)
dydx_all = np.empty((Np, Nstep, 5, Nra))
dev_der = np.empty((Np, Nstep-2*layer))
step_der = np.empty((Np, Nstep-2*layer))
for p in range(Np):
plabel = get_parlabel(pid[p])
if verbose: print(p, plabel)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
if verbose: print(i, s)
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
# symmetric derivatives
dydx = np.empty((Nstep, 5, Nra))
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx[i][j] = -dy / np.abs(2*step[i]*dp[p])
dydx_all[p] = dydx
# deviations from adjacent steps
step_der[p] = -step[layer:Nstep-layer] * dp[p]
for i in range(layer, Nstep-layer):
dev_der[p][i-layer] = 0
for j in range(5):
for l in range(layer):
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i-l-1][j])**2)
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i+l+1][j])**2)
np.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_all, steps_all=np.outer(dpvec,step[Nstep:]))
if graph:
plt.close()
fig, ax = plt.subplots(1,Np,figsize=(4*Np,4))
for p in range(Np):
plt.sca(ax[p])
plt.plot(step_der[p], dev_der[p], 'ko')
#plabel = get_parlabel(pid[p])
#plt.xlabel('$\Delta$ {}'.format(plabel))
plt.ylabel('D')
plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
plabels, units = get_parlabel(pid)
punits = ['({})'.format(x) if len(x) else '' for x in units]
t = np.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.npz'.format(name, vlabel, Nstep, log, layer))
dev = t['dev']
step = t['step']
dydx = t['ders']
steps_all = t['steps_all'][:,::-1]
Nra = np.shape(dydx)[-1]
best = np.empty(Np)
# plot setup
da = 4
nrow = 2
ncol = Np
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), squeeze=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]})
for p in range(Np):
# choose step
dmin = np.min(dev[p])
dtol = tolerance * dmin
opt_step = np.min(step[p][dev[p]<dtol])
opt_id = step[p]==opt_step
best[p] = opt_step
## largest step w deviation smaller than 1e-4
#opt_step = np.max(step[p][dev[p]<1e-4])
#opt_id = step[p]==opt_step
#best[p] = opt_step
plt.sca(ax[0][p])
for i in range(5):
for j in range(10):
plt.plot(steps_all[p], np.tanh(dydx[p,:,i,np.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5)
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.ylim(-1,1)
plt.ylabel('Derivative')
plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='small')
plt.sca(ax[1][p])
plt.plot(step[p], dev[p], 'ko')
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.plot(step[p][opt_id], dev[p][opt_id], 'ro')
plt.axhline(dtol, ls='-', color='orange', lw=1)
y0, y1 = plt.gca().get_ylim()
plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p]))
plt.ylabel('Derivative deviation')
np.save('../data/optimal_step_{}_{}'.format(name, vlabel), best)
plt.tight_layout(h_pad=0)
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def read_optimal_step(name, vary, equal=False):
"""Return optimal steps for a range of parameter types"""
if type(vary) is not list:
vary = [vary]
dp = np.empty(0)
for v in vary:
dp_opt = np.load('../data/optimal_step_{}_{}.npy'.format(name, v))
dp = np.concatenate([dp, dp_opt])
if equal:
dp = np.array([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01])
return dp
def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
iexsort = np.argsort(fiducial.obs[0])
raex = np.linspace(np.percentile(fiducial.obs[0], 10), np.percentile(fiducial.obs[0], 90), Nobs)
tex = np.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)]
fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k)
nrow = 2
ncol = np.int64((Np+1)/nrow)
da = 4
c = ['b', 'b', 'b', 'r', 'r', 'r']
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), squeeze=False)
for p in range(Np):
plt.sca(ax[p%2][int(p/2)])
for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k)
plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i])
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
#print(get_parlabel(p))
plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium')
plt.tight_layout()
plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200)
# observing modes
def define_obsmodes():
"""Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes"""
obsmodes = {}
obsmodes['fiducial'] = {'sig_obs': np.array([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['binospec'] = {'sig_obs': np.array([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['hectochelle'] = {'sig_obs': np.array([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['desi'] = {'sig_obs': np.array([0.1, 2, 10, np.nan, np.nan]), 'Ndim': [4,]}
obsmodes['gaia'] = {'sig_obs': np.array([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]}
obsmodes['exgal'] = {'sig_obs': np.array([0.5, np.nan, 20, np.nan, np.nan]), 'Ndim': [3,]}
pickle.dump(obsmodes, open('../data/observing_modes.info','wb'))
def obsmode_name(mode):
"""Return full name of the observing mode"""
if type(mode) is not list:
mode = [mode]
full_names = {'fiducial': 'Fiducial',
'binospec': 'Binospec',
'hectochelle': 'Hectochelle',
'desi': 'DESI-like',
'gaia': 'Gaia-like',
'exgal': 'Extragalactic'}
keys = full_names.keys()
names = []
for m in mode:
if m in keys:
name = full_names[m]
else:
name = m
names += [name]
return names
# crbs using bspline
def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=np.nan, dd=0.5, Nmin=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = np.sort(mock['xi_range'])
else:
rotmatrix = np.eye(3)
xmm = np.sort(mock['ra_range'])
# typical uncertainties and data availability
obsmodes = pickle.load(open('../data/observing_modes.info', 'rb'))
if errmode not in obsmodes.keys():
errmode = 'fiducial'
sig_obs = obsmodes[errmode]['sig_obs']
data_dim = obsmodes[errmode]['Ndim']
# mock observations
if np.any(~np.isfinite(ra)):
if (np.int64((xmm[1]-xmm[0])/dd + 1) < Nmin):
dd = (xmm[1]-xmm[0])/Nmin
ra = np.arange(xmm[0], xmm[1]+dd, dd)
#ra = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs)
#else:
Nobs = np.size(ra)
print(name, Nobs)
err = np.tile(sig_obs, Nobs).reshape(Nobs,-1)
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)]
if scale:
dp_unit = unity_scale(dp)
dps = [x*y for x,y in zip(dp, dp_unit)]
# calculate derivatives for all parameters
for p in range(Np):
for i, s in enumerate([-1, 1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
for j in range(5):
fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
# populate matrix of derivatives and calculate CRB
for Ndim in data_dim:
#for Ndim in [6,]:
Ndata = Nobs * (Ndim - 1)
cyd = np.empty(Ndata)
dydx = np.empty((Np, Ndata))
dy2 = np.empty((2, Np, Ndata))
for j in range(1, Ndim):
for p in range(Np):
dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra)
dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra)
dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra)
#positive = np.abs(dy)>0
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dy[positive])), np.max(np.abs(dy)), np.median(np.abs(dy))))
if scale:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dps[p].value)
else:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dp[p].value)
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dydx[p][(j-1)*Nobs:j*Nobs][positive])), np.max(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])), np.median(np.abs(dydx[p][(j-1)*Nobs:j*Nobs]))))
#print(j, p, get_parlabel(pid[p])[0], dp[p], np.min(np.abs(dy)), np.max(np.abs(dy)), np.median(dydx[p][(j-1)*Nobs:j*Nobs]))
cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2
np.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt)
# data component of the Fisher matrix
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
dxi = np.matmul(dydx, caux)
# component based on prior knowledge of model parameters
pxi = priors(name, vary)
# full Fisher matrix
cxi = dxi + pxi
if verbose:
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
sx = np.sqrt(np.diag(cx))
print('CRB', sx)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
np.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi)
def priors(name, vary):
"""Return covariance matrix with prior knowledge about parameters"""
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
cprog = mock['prog_prior']
cbary = np.array([0.1*x.value for x in pparams_fid[:5]])**-2
chalo = np.zeros(4)
cdipole = np.zeros(3)
cquad = np.zeros(5)
coctu = np.zeros(7)
priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu}
cprior = np.empty(0)
for v in vary:
cprior = np.concatenate([cprior, priors[v]])
pxi = np.diag(cprior)
return pxi
def scale2invert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
#dp = read_optimal_step(name, vary)
d = np.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
dydx = d['dydx']
cyd = d['cyd']
y = d['y']
dp = d['dp']
dy = (y[1,:,:] - y[0,:,:])
dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,np.newaxis])
scaling_par = np.median(np.abs(dydx), axis=1)
dydx = dydx / scaling_par[:,np.newaxis]
dydx_ = np.reshape(dydx, (len(dp), Ndim-1, -1))
scaling_dim = np.median(np.abs(dydx_), axis=(2,0))
dydx_ = dydx_ / scaling_dim[np.newaxis,:,np.newaxis]
cyd_ = np.reshape(cyd, (Ndim-1, -1))
cyd_ = cyd_ / scaling_dim[:,np.newaxis]
cyd = np.reshape(cyd_, (-1))
dydx = np.reshape(dydx_, (len(dp), -1))
mmin = np.min(np.abs(dy), axis=0)
mmax = np.max(np.abs(dy), axis=0)
mmed = np.median(np.abs(dydx), axis=1)
dyn_range = mmax/mmin
#print(dyn_range)
print(np.min(dyn_range), np.max(dyn_range), np.std(dyn_range))
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
cxi = np.matmul(dydx, caux)
print('condition {:e}'.format(np.linalg.cond(cxi)))
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi, maxiter=30)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
def unity_scale(dp):
""""""
dim_scale = 10**np.array([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4])
dim_scale = 10**np.array([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3])
dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))]
return dp_unit
def test_inversion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
d = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = d['cxi']
N = np.shape(cxi)[0]
cx_ = np.linalg.inv(cxi)
cx = stable_inverse(cxi, verbose=True, maxiter=100)
#cx_ii = stable_inverse(cx, verbose=True, maxiter=50)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('linalg inverse', np.allclose(np.matmul(cx_,cxi), np.eye(N)))
print('stable inverse', np.allclose(np.matmul(cx,cxi), np.eye(N)))
#print(np.matmul(cx,cxi))
#print('inverse inverse', np.allclose(cx_ii, cxi))
def stable_inverse(a, maxiter=20, verbose=False):
"""Invert a matrix with a bad condition number"""
N = np.shape(a)[0]
# guess
q = np.linalg.inv(a)
qa = np.matmul(q,a)
# iterate
for i in range(maxiter):
if verbose: print(i, np.sqrt(np.sum((qa - np.eye(N))**2)), np.allclose(qa, np.eye(N)))
if np.allclose(qa, np.eye(N)):
return q
qai = np.linalg.inv(qa)
q = np.matmul(qai,q)
qa = np.matmul(q,a)
return q
def crb_triangle(n, vary, Ndim=6, align=True, plot='all', fast=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
#print(cx[0][0])
if plot=='halo':
cx = cx[:4, :4]
params = params[:4]
elif plot=='bary':
cx = cx[4:9, 4:9]
params = params[4:9]
elif plot=='progenitor':
cx = cx[9:, 9:]
params = params[9:]
Nvar = len(params)
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arccos(v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2)
plt.gca().add_patch(e)
plt.gca().autoscale_view()
#plt.xlim(-ylim[i],ylim[i])
#plt.ylim(-ylim[j], ylim[j])
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot))
def crb_triangle_alldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='all', fast=False, scale=False, errmode='fiducial'):
"""Show correlations in CRB between a chosen set of parameters in a triangle plot"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, Ndim in enumerate([3, 4, 6]):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
#cxi = np.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npy'.format(errmode, Ndim, name, align, vlabel))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot))
def compare_optimal_steps():
""""""
vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad']
vary = ['progenitor', 'bary', 'halo']
for name in ['gd1', 'tri']:
print(name)
print(read_optimal_step(name, vary))
def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True):
""""""
if first:
store_progparams(name)
wrap_angles(name, save=True)
progenitor_prior(name)
find_greatcircle(name=name)
endpoints(name)
for v in vary:
step_convergence(name=name, Nstep=Nstep, vary=v)
choose_step(name=name, Nstep=Nstep, vary=v)
calculate_crb(name=name, vary=vary, verbose=True)
crb_triangle_alldim(name=name, vary=vary)
########################
# cartesian coordinates
# accelerations
def acc_kepler(x, p=1*u.Msun):
"""Keplerian acceleration"""
r = np.linalg.norm(x)*u.kpc
a = -G * p * 1e11 * r**-3 * x
return a.to(u.pc*u.Myr**-2)
def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]):
""""""
r = np.linalg.norm(x)*u.kpc
a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2)
return a
def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]):
""""""
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
a = -(G*p[0]*x * (R**2 + (p[1] + np.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2)
a[2] *= (1 + p[2]/np.sqrt(z**2 + p[2]**2))
return a
def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]):
""""""
r = np.linalg.norm(x)*u.kpc
q = np.array([1*u.Unit(1), p[2], p[3]])
a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - np.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2)
return a
def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]):
"""Acceleration due to outside dipole perturbation"""
pv = [x.value for x in p]
a = np.sqrt(3/(4*np.pi)) * np.array([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2
return a
def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Acceleration due to outside quadrupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = 0.5*np.sqrt(15/np.pi)
a[0] = x[0]*(f*p[4] - f/np.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3]
a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/np.sqrt(3)*p[2]) + x[2]*f*p[1]
a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/np.sqrt(3)*p[2]
return a.to(u.pc*u.Myr**-2)
def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Acceleration due to outside octupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
xu = x.unit
pu = p[0].unit
pvec = np.array([i.value for i in p]) * pu
dmat = np.ones((3,7)) * f * pvec * xu**2
x = np.array([i.value for i in x])
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
a = np.einsum('ij->i', dmat) * dmat.unit
return a.to(u.pc*u.Myr**-2)
# derivatives
def der_kepler(x, p=1*u.Msun):
"""Derivative of Kepler potential parameters wrt cartesian components of the acceleration"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun
dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11
return dmat.value
def pder_kepler(x, p=1*u.Msun):
"""Derivative of cartesian components of the acceleration wrt to Kepler potential parameter"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1
dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11
return dmat.value
def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]):
"""Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters"""
p = pu
q = np.array([1, p[2], p[3]])
# physical quantities
r = np.linalg.norm(x)*u.kpc
a = acc_nfw(x, p=pu)
# derivatives
dmat = np.zeros((3, 4))
# Vh
dmat[:,0] = 2*a/p[0]
# Rh
dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2
# qy, qz
for i in [1,2]:
dmat[i,i+1] = (-2*a[i]/q[i]).value
return dmat
def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters"""
# coordinates
r = np.linalg.norm(x)*u.kpc
# accelerations
ab = acc_bulge(x, p=pu[:2])
# derivatives
dmat = np.zeros((3, 2))
# Mb
dmat[:,0] = ab/pu[0]
# ab
dmat[:,1] = 2 * ab / (r + pu[1])
return dmat
def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters"""
# coordinates
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
aux = np.sqrt(z**2 + pu[2]**2)
# accelerations
ad = acc_disk(x, p=pu)
# derivatives
dmat = np.zeros((3, 3))
# Md
dmat[:,0] = ad / pu[0]
# ad
dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2)
# bd
dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux
dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value
return dmat
def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt((4*np.pi)/3)
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt(3/(4*np.pi))
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a"""
f = 2/np.sqrt(15/np.pi)
s = np.sqrt(3)
x = [1e-3/i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] = np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] = np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] = np.array([0, x[1], 0.5*s*x[2], x[0], 0])
return dmat
def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters"""
f = 0.5*np.sqrt(15/np.pi)
s = 1/np.sqrt(3)
x = [1e-3*i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] *= np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] *= np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] *= np.array([0, x[1], 2*s*x[2], x[0], 0])
return dmat
def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters"""
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
x = [1e-3*i.value for i in x]
dmat = np.ones((3,7)) * f
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
return dmat
def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False):
"""Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# subset halo parameters
Nhalo = 4
cq = cx[:Nhalo,:Nhalo]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
xi = np.array([-8.3, 0.1, 0.1])*u.kpc
x0, v0 = gd1_coordinates()
#xi = np.array(x0)*u.kpc
d = 50
Nb = 20
x = np.linspace(x0[0]-d, x0[0]+d, Nb)
y = np.linspace(x0[1]-d, x0[1]+d, Nb)
x = np.linspace(-d, d, Nb)
y = np.linspace(-d, d, Nb)
xv, yv = np.meshgrid(x, y)
xf = np.ravel(xv)
yf = np.ravel(yv)
af = np.empty((Nb**2, 3))
plt.close()
fig, ax = plt.subplots(3,3,figsize=(11,10))
dimension = ['x', 'y', 'z']
xlabel = ['y', 'x', 'x']
ylabel = ['z', 'z', 'y']
for j in range(3):
if j==0:
xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T
elif j==1:
xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T
elif j==2:
xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T
for i in range(Nb**2):
#xi = np.array([xf[i], yf[i], x0[2]])*u.kpc
xi = xin[i]*u.kpc
a = acc_nfw(xi)
dqda = halo_accelerations(xi)
cai = np.matmul(dqda, np.matmul(cqi, dqda.T))
if fast:
ca = np.linalg.inv(cai)
else:
ca = stable_inverse(cai)
a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2)
af[i] = np.abs(a_crb/a)
af[i] = a_crb
for i in range(3):
plt.sca(ax[j][i])
im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1)
plt.xlabel(xlabel[j]+' (kpc)')
plt.ylabel(ylabel[j]+' (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", size="4%", pad=0.05)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.gca().xaxis.set_ticks_position('top')
cax.tick_params(axis='x', labelsize='xx-small')
if j==0:
plt.title('a$_{}$'.format(dimension[i]), y=4)
plt.tight_layout(rect=[0,0,1,0.95])
plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim))
def acc_cart(x, components=['bary', 'halo', 'dipole']):
""""""
acart = np.zeros(3) * u.pc*u.Myr**-2
dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]}
accelerations = []
for c in components:
accelerations += dict_acc[c]
for acc in accelerations:
a_ = acc(x)
acart += a_
return acart
def acc_rad(x, components=['bary', 'halo', 'dipole']):
"""Return radial acceleration"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
a_cart = acc_cart(x, components=components)
a_rad = np.dot(a_cart, trans)
return a_rad
def ader_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_rad(x, components=['bary', 'halo', 'dipole']):
"""Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
dadq_cart = apder_cart(x, components=components)
dadq_rad = np.einsum('ij,i->j', dadq_cart, trans)
return dadq_rad
def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = 3e-1
vmax = 1e1
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix, 3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i] = dadq
ca = np.matmul(dadq, np.matmul(cq, dadq.T))
a_crb = np.sqrt(np.diag(ca)) * u.pc * u.Myr**-2
if relative:
af[i] = np.abs(a_crb/a)
else:
af[i] = a_crb
#print(xi, a_crb)
# save
np.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$']
for i in range(3):
plt.sca(ax[i])
im = plt.imshow(af[:,i].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=mpl.colors.LogNorm())
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i] + rlabel)
plt.tight_layout()
plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative))
def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()}
x0, v0 = prog_coords[n]
print(x0)
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
## check orthogonality:
#for i in range(Npot-1):
#for k in range(i+1, Npot):
#print(i, k)
#print(np.dot(vecs[:,i], vecs[:,k]))
#print(np.dot(vecs[::3,i], vecs[::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]))
# save
np.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-2
vmax = 5e0
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
for i in range(3):
plt.sca(ax[i])
#im = plt.imshow(vecs[i::3,j].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax)
im = plt.imshow(vcomb[i::3].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=norm)
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i])
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative, progenitor))
def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', d=20, Nb=10):
"""Plot acceleration field in R,z plane"""
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
x0 = np.array([4, 4, 0])
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * | np.sign(xin[:,0]) | numpy.sign |
# 8/13/18
# chenyong
# call plant height
"""
call plant height from predicted images
"""
import os
import sys
import cv2
import numpy as np
import pandas as pd
import os.path as op
import scipy.misc as sm
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import rcParams
from PIL import Image
from math import hypot
from schnablelab.apps.natsort import natsorted
from schnablelab.apps.headers import Slurm_header
from sklearn.linear_model import LinearRegression
from schnablelab.apps.base import ActionDispatcher, OptionParser, glob
import datetime
from dateutil import parser
from pathlib import Path
def main():
actions = (
('Polish', 'Polish the predicted images (hyper)'),
('PolishBatch', 'generate all slurm jobs of polish (hyper)'),
('CallHeight', 'call height from polished image (hyper)'),
('CallHeightBatch', 'generate all slurm jobs of plant height calling (hyper)'),
('CallHeightRGB', 'call height from RGB image'),
('CallHeightRGBBatch', 'generate all slurm jobs of plant height calling (RGB)'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def CallHeightRGB(args):
"""
%prog image_in_dir
using thresholding method to calculate the plant height
"""
p = OptionParser(CallHeightRGB.__doc__)
p.add_option("--threshold", default = '1.12',
help='speficy the threshold cutoff')
p.add_option("--zoom_date",
help='specify which date zoome level changed, yyyy-mm-dd')
p.add_option("--summarize_fn", default= 'Heights.csv',
help='specify the file recording height for each sample')
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
imgInDir, = args
inPath = Path(imgInDir)
imgs = list(inPath.glob('*png'))
print('Total %s images'%len(imgs))
df = pd.DataFrame(dict(zip(['fullPath'],[imgs])))
df['fn'] = df['fullPath'].apply(lambda x: x.name)
df['sm'] = df['fn'].apply(lambda x: x.split('_')[1])
df['dt'] = df['fn'].apply(lambda x: x.split('_')[2].split('.')[0])
df['dt'] = pd.to_datetime(df['dt'])
#df['sv'] = df['fn'].apply(lambda x: x.split('_')[-1].split('.')[0])
#df_sort = df.sort_values(['sm','dt','sv']).reset_index(drop=True)
df_sort = df.sort_values(['sm','dt']).reset_index(drop=True)
#print(df_sort)
threshold = float(opts.threshold)
print('threshold by green index value %s'%threshold)
zoom_date = parser.parse(opts.zoom_date)
print('zoom change date: %s'%zoom_date)
zoom_border_dict = {'zoom1': (60,1750,500,2250), 'zoom2': (180,1700,850,1770)}
zoom_ratio_dict = {'zoom1': 149/1925, 'zoom2': 149/965}
f0 = open(opts.summarize_fn, 'w')
f0.write('file_name\tzoome_level\theight(pixel)\theight(cm)\n')
for idx, row in df_sort.iterrows():
print(row['fn'])
# read image and convert bgr to rgb
img = cv2.imread(str(row['fullPath']))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
print('read image and convert bgr to rgb')
# convert 3 2D to 1 2D with green index
img_float = img.astype(np.float)
img_green = (2*img_float[:,:,1])/(img_float[:,:,0]+img_float[:,:,2]) # 2*green/(red+blue)
thresh1 = np.where(img_green>threshold, img_green, 0)
print('convert 3 2D to 1 2D with green index')
# remove the chamber border
mytime = row['dt']
zoom_level = 'zoom1' if mytime < zoom_date else 'zoom2'
upper,bottom,left,right = zoom_border_dict[zoom_level]
thresh1[0:upper]=0
thresh1[bottom:]=0
thresh1[:,0:left]=0
thresh1[:,right:]=0
print('remove the chamber border')
# rescale to 255
try:
thresh1 = (thresh1/float(thresh1.max()))*255
except:
f0.write('%s\t%s\tNaN\tNaN\n'%(row['fn'], zoom_level))
continue
# blur the image
blur = cv2.GaussianBlur(thresh1, (7,7), 0)
# 2nd threshold
blur_int = blur.astype(np.uint8)
ret, thresh2 = cv2.threshold(blur_int, 1, 255, cv2.THRESH_BINARY)
# call contours
'''there are three arguments in cv2.findContours() function, first one is source image,
second is contour retrieval mode, third is contour approximation method.
And it outputs the contours and hierarchy. contours is a Python list of all the contours in the image.
Each individual contour is a Numpy array of (x,y) coordinates of boundary points of the object.'''
__,contours,__ = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,255,0), 3)
# call height
min_y, max_y = [],[]
for i in contours:
min_y.append(np.min(i[:,:,1]))
max_y.append(np.max(i[:,:,1]))
if min_y and max_y:
y_lowest, y_highest = min(min_y), max(max_y)
height_pixels = y_highest-y_lowest
height_cm = height_pixels*zoom_ratio_dict[zoom_level]
f0.write('%s\t%s\t%s\t%s\n'%(row['fn'], zoom_level, height_pixels, height_cm))
# draw height and save results
cv2.line(img, (500, y_lowest), (2000, y_lowest), (255,0,0), 7)
new_fn = row['fn'].replace('.png', '.height.png')
new_fn_path = inPath/new_fn
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # cv2 assume your color is bgr
cv2.imwrite(str(new_fn_path), img)
#print('%s saved.'%new_fn)
else:
f0.write('%s\t%s\tNaN\tNaN\n'%(row['fn'], zoom_level))
f0.close()
print('Done! check %s'%opts.summarize_fn)
def CallPart(rgb_arr, part='stem'):
crp_shape2d = rgb_arr.shape[0:2]
if part =='stem':
r, g, b = 251, 129, 14
elif part == 'panicle':
r, g, b = 126, 94, 169
elif part == 'leaf':
r, g, b = 0, 147, 0
else:
sys.exit('only support stem, panicle, and leaf')
p1 = np.full(crp_shape2d,r)
p2 = np.full(crp_shape2d,g)
p3 = np.full(crp_shape2d,b)
p123 = | np.stack([p1, p2, p3], axis=2) | numpy.stack |
import tensorflow as tf
import numpy as np
import h5py
from time import time
from datetime import datetime
from random import shuffle
from functools import reduce
import os
max_seg = 20
max_word = 40
level_class_cnt = 3
test_percentage = 0.2
dropout_rate = tf.Variable(0.5)
hidden_feature_dim = 100
gru_feature_dim = 50
kernel_heights = [3, 4, 5]
batch_size = 256
epochs = 8
main_path = '/home/tim/Documents/NLP/gourmet/test'
w2v_weights_path = main_path + '/weights.npy'
tensorboard_log_dir_train = "/tmp/pycharm_nlp/logs/remake2/g_sen_large_max_" + datetime.now().strftime("%Y%m%d-%H%M%S") + "train"
tensorboard_log_dir_test = "/tmp/pycharm_nlp/logs/remake2/g_sen_large_max_" + datetime.now().strftime("%Y%m%d-%H%M%S") + "test"
input_path = main_path + '/gourmet.hdf5'
model_out_path = main_path + '/model.h5'
sample_amount = 0
mini_batch_cnt = 0
with h5py.File(input_path) as in_file:
for index in range(len(in_file['label/'].keys())):
mini_batch_cnt += 1
sample_amount += len(in_file['label/' + str(index)])
batch_indices = [*range(mini_batch_cnt)]
shuffle(batch_indices)
train_batches = batch_indices[0:int(mini_batch_cnt * (1 - test_percentage))]
test_batches = batch_indices[int(mini_batch_cnt * (1 - test_percentage)):]
w2v = np.load(w2v_weights_path, allow_pickle=True)
w2v_len = w2v.shape[1]
def map_to_w2v(x):
""" Get the w2v embedding for a specific w2v-id """
return w2v[int(x)]
def __label_map(raw_label):
""" Map the 5 label classes to the 3 classes """
if raw_label < 3:
return 0
elif raw_label == 3:
return 1
else:
return 2
def __balance_data(feature_array, label_array):
""" Balance batches """
to_balance_indices = np.where(label_array == 2)[0]
feature_array = np.delete(feature_array, to_balance_indices, axis=0)
label_array = np.delete(label_array, to_balance_indices, axis=0)
to_balance_indices = np.where(label_array == 4)[0]
feature_array = np.delete(feature_array, to_balance_indices, axis=0)
label_array = | np.delete(label_array, to_balance_indices, axis=0) | numpy.delete |
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds.MurckoScaffold import GetScaffoldForMol
from rdkit.ML.Cluster import Butina
import pandas as pd
import random
RANDOM_SEED = 42
def smiles_to_ecfp(smiles: list, radius: int = 2, nbits: int = 1024):
"""Calculate the morgan fingerprint of a list of smiles. Return a numpy array"""
fps = []
for smi in smiles:
mol = Chem.MolFromSmiles(smi)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nbits)
arr = | np.zeros((1,)) | numpy.zeros |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Software dvae-speech
Copyright Inria
Year 2020
Contact : <EMAIL>
License agreement in LICENSE.txt
The main python file for model training, data test and performance evaluation, see README.md for further information
"""
import os
import shutil
import socket
import datetime
import pickle
import numpy as np
import torch
import librosa
import soundfile as sf
import speechmetrics
import matplotlib.pyplot as plt
from .utils import myconf, get_logger, EvalMetrics, SpeechSequencesFull, SpeechSequencesRandom
from .model import build_VAE, build_DKF, build_KVAE, build_STORN, build_VRNN, build_SRNN, build_RVAE, build_DSAE
class LearningAlgorithm():
"""
Basical class for model building, including:
- read common paramters for different models
- define data loader
- define loss function as a class member
"""
def __init__(self, config_file='config_default.ini'):
# Load config parser
self.config_file = config_file
if not os.path.isfile(self.config_file):
raise ValueError('Invalid config file path')
self.cfg = myconf()
self.cfg.read(self.config_file)
self.model_name = self.cfg.get('Network', 'name')
self.dataset_name = self.cfg.get('DataFrame', 'dataset_name')
# Get host name and date
self.hostname = socket.gethostname()
self.date = datetime.datetime.now().strftime("%Y-%m-%d-%Hh%M")
# Load STFT parameters
wlen_sec = self.cfg.getfloat('STFT', 'wlen_sec')
hop_percent = self.cfg.getfloat('STFT', 'hop_percent')
fs = self.cfg.getint('STFT', 'fs')
zp_percent = self.cfg.getint('STFT', 'zp_percent')
wlen = wlen_sec * fs
wlen = np.int(np.power(2, np.ceil(np.log2(wlen)))) # pwoer of 2
hop = np.int(hop_percent * wlen)
nfft = wlen + zp_percent * wlen
win = torch.sin(torch.arange(0.5, wlen+0.5) / wlen * np.pi)
STFT_dict = {}
STFT_dict['fs'] = fs
STFT_dict['wlen'] = wlen
STFT_dict['hop'] = hop
STFT_dict['nfft'] = nfft
STFT_dict['win'] = win
STFT_dict['trim'] = self.cfg.getboolean('STFT', 'trim')
self.STFT_dict = STFT_dict
# Load model parameters
self.use_cuda = self.cfg.getboolean('Training', 'use_cuda')
self.device = 'cuda' if torch.cuda.is_available() and self.use_cuda else 'cpu'
# Build model
self.build_model()
def build_model(self):
if self.model_name == 'VAE':
self.model = build_VAE(cfg=self.cfg, device=self.device)
elif self.model_name == 'DKF':
self.model = build_DKF(cfg=self.cfg, device=self.device)
elif self.model_name == 'KVAE':
self.model = build_KVAE(cfg=self.cfg, device=self.device)
elif self.model_name == 'STORN':
self.model = build_STORN(cfg=self.cfg, device=self.device)
elif self.model_name == 'VRNN':
self.model = build_VRNN(cfg=self.cfg, device=self.device)
elif self.model_name == 'SRNN':
self.model = build_SRNN(cfg=self.cfg, device=self.device)
elif self.model_name == 'RVAE':
self.model = build_RVAE(cfg=self.cfg, device=self.device)
elif self.model_name == 'DSAE':
self.model = build_DSAE(cfg=self.cfg, device=self.device)
def load_state_dict(self, state_dict_file):
self.model.load_state_dict(torch.load(state_dict_file, map_location=self.device))
def init_optimizer(self):
# Load
self.optimization = self.cfg.get('Training', 'optimization')
lr = self.cfg.getfloat('Training', 'lr')
# Init optimizer (Adam by default)
if self.model_name=='KVAE':
lr_tot = self.cfg.getfloat('Training', 'lr_tot')
if self.optimization == 'adam':
self.optimizer_vae = torch.optim.Adam(self.model.iter_vae, lr=lr)
self.optimizer_vae_kf = torch.optim.Adam(self.model.iter_vae_kf, lr=lr_tot)
self.optimizer_all = torch.optim.Adam(self.model.iter_all, lr=lr_tot)
else:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
else:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
def build_dataloader(self, train_data_dir, val_data_dir, sequence_len, batch_size, STFT_dict, use_random_seq=False):
# List all the data with certain suffix
data_suffix = self.cfg.get('DataFrame', 'suffix')
train_file_list = librosa.util.find_files(train_data_dir, ext=data_suffix)
val_file_list = librosa.util.find_files(val_data_dir, ext=data_suffix)
# Generate dataloader for pytorch
num_workers = self.cfg.getint('DataFrame', 'num_workers')
shuffle_file_list = self.cfg.get('DataFrame', 'shuffle_file_list')
shuffle_samples_in_batch = self.cfg.get('DataFrame', 'shuffle_samples_in_batch')
# Instantiate dataloader
if use_random_seq:
train_dataset = SpeechSequencesRandom(file_list=train_file_list, sequence_len=sequence_len,
STFT_dict=self.STFT_dict, shuffle=shuffle_file_list, name=self.dataset_name)
val_dataset = SpeechSequencesRandom(file_list=val_file_list, sequence_len=sequence_len,
STFT_dict=self.STFT_dict, shuffle=shuffle_file_list, name=self.dataset_name)
else:
train_dataset = SpeechSequencesFull(file_list=train_file_list, sequence_len=sequence_len,
STFT_dict=self.STFT_dict, shuffle=shuffle_file_list, name=self.dataset_name)
val_dataset = SpeechSequencesFull(file_list=val_file_list, sequence_len=sequence_len,
STFT_dict=self.STFT_dict, shuffle=shuffle_file_list, name=self.dataset_name)
train_num = train_dataset.__len__()
val_num = val_dataset.__len__()
# Create dataloader
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=shuffle_samples_in_batch,
num_workers = num_workers)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size,
shuffle=shuffle_samples_in_batch,
num_workers = num_workers)
return train_dataloader, val_dataloader, train_num, val_num
def get_basic_info(self):
basic_info = []
basic_info.append('HOSTNAME: ' + self.hostname)
basic_info.append('Time: ' + self.date)
basic_info.append('Device for training: ' + self.device)
if self.device == 'cuda':
basic_info.append('Cuda verion: {}'.format(torch.version.cuda))
basic_info.append('Model name: {}'.format(self.model_name))
return basic_info
def train(self):
# Set module.training = True
self.model.train()
torch.autograd.set_detect_anomaly(True)
# Create directory for results
saved_root = self.cfg.get('User', 'saved_root')
z_dim = self.cfg.getint('Network','z_dim')
tag = self.cfg.get('Network', 'tag')
filename = "{}_{}_{}_z_dim={}".format(self.dataset_name, self.date, tag, z_dim)
save_dir = os.path.join(saved_root, filename)
if not(os.path.isdir(save_dir)):
os.makedirs(save_dir)
# Save the model configuration
save_cfg = os.path.join(save_dir, 'config.ini')
shutil.copy(self.config_file, save_cfg)
# Create logger
log_file = os.path.join(save_dir, 'log.txt')
logger_type = self.cfg.getint('User', 'logger_type')
logger = get_logger(log_file, logger_type)
# Print basical infomation
for log in self.get_basic_info():
logger.info(log)
logger.info('In this experiment, result will be saved in: ' + save_dir)
# Print model infomation (optional)
if self.cfg.getboolean('User', 'print_model'):
for log in self.model.get_info():
logger.info(log)
# Init optimizer
self.init_optimizer()
batch_size = self.cfg.getint('Training', 'batch_size')
sequence_len = self.cfg.getint('DataFrame','sequence_len')
use_random_seq = self.cfg.getboolean('DataFrame','use_random_seq')
# Create data loader
train_data_dir = self.cfg.get('User', 'train_data_dir')
val_data_dir = self.cfg.get('User', 'val_data_dir')
loader = self.build_dataloader(train_data_dir=train_data_dir, val_data_dir=val_data_dir,
sequence_len=sequence_len, batch_size=batch_size,
STFT_dict=self.STFT_dict, use_random_seq=use_random_seq)
train_dataloader, val_dataloader, train_num, val_num = loader
log_message = 'Training samples: {}'.format(train_num)
logger.info(log_message)
log_message = 'Validation samples: {}'.format(val_num)
logger.info(log_message)
# KVAE needs a schedule training
if self.model_name == 'KVAE':
self.train_kvae(logger, save_dir, train_dataloader, val_dataloader, train_num, val_num)
else:
self.train_normal(logger, save_dir, train_dataloader, val_dataloader, train_num, val_num)
def train_normal(self, logger, save_dir, train_dataloader, val_dataloader, train_num, val_num):
# Load training parameters
epochs = self.cfg.getint('Training', 'epochs')
early_stop_patience = self.cfg.getint('Training', 'early_stop_patience')
save_frequency = self.cfg.getint('Training', 'save_frequency')
# Create python list for loss
train_loss = np.zeros((epochs,))
val_loss = np.zeros((epochs,))
train_recon = np.zeros((epochs,))
train_KLD = np.zeros((epochs,))
val_recon = np.zeros((epochs,))
val_KLD = np.zeros((epochs,))
best_val_loss = np.inf
cpt_patience = 0
cur_best_epoch = epochs
best_state_dict = self.model.state_dict()
# Define optimizer (might use different training schedule)
optimizer = self.optimizer
# Train with mini-batch SGD
for epoch in range(epochs):
start_time = datetime.datetime.now()
# Batch training
for batch_idx, batch_data in enumerate(train_dataloader):
batch_data = batch_data.to(self.device)
recon_batch_data = self.model(batch_data, compute_loss=True)
loss_tot, loss_recon, loss_KLD = self.model.loss
optimizer.zero_grad()
loss_tot.backward()
optimizer.step()
train_loss[epoch] += loss_tot.item()
train_recon[epoch] += loss_recon.item()
train_KLD[epoch] += loss_KLD.item()
# Validation
for batch_idx, batch_data in enumerate(val_dataloader):
batch_data = batch_data.to(self.device)
recon_batch_data = self.model(batch_data, compute_loss=True)
loss_tot, loss_recon, loss_KLD = self.model.loss
val_loss[epoch] += loss_tot.item()
val_recon[epoch] += loss_recon.item()
val_KLD[epoch] += loss_KLD.item()
# Loss normalization
train_loss[epoch] = train_loss[epoch]/ train_num
val_loss[epoch] = val_loss[epoch] / val_num
train_recon[epoch] = train_recon[epoch] / train_num
train_KLD[epoch] = train_KLD[epoch]/ train_num
val_recon[epoch] = val_recon[epoch] / val_num
val_KLD[epoch] = val_KLD[epoch] / val_num
# Early stop patiance
if val_loss[epoch] < best_val_loss:
best_val_loss = val_loss[epoch]
cpt_patience = 0
best_state_dict = self.model.state_dict()
cur_best_epoch = epoch
else:
cpt_patience += 1
# Training time
end_time = datetime.datetime.now()
interval = (end_time - start_time).seconds / 60
log_message = 'Epoch: {} train loss: {:.4f} val loss {:.4f} training time {:.2f}m'.format(epoch, train_loss[epoch], val_loss[epoch], interval)
logger.info(log_message)
# Stop traning if early-stop triggers
if cpt_patience == early_stop_patience:
logger.info('Early stop patience achieved')
break
# Save model parameters regularly
if epoch % save_frequency == 0:
save_file = os.path.join(save_dir, self.model_name + '_epoch' + str(cur_best_epoch) + '.pt')
torch.save(self.model.state_dict(), save_file)
# Save the final weights of network with the best validation loss
train_loss = train_loss[:epoch+1]
val_loss = val_loss[:epoch+1]
train_recon = train_recon[:epoch+1]
train_KLD = train_KLD[:epoch+1]
val_recon = val_recon[:epoch+1]
val_KLD = val_KLD[:epoch+1]
save_file = os.path.join(save_dir, self.model_name + '_final_epoch' + str(cur_best_epoch) + '.pt')
torch.save(best_state_dict, save_file)
# Save the training loss and validation loss
loss_file = os.path.join(save_dir, 'loss_model.pckl')
with open(loss_file, 'wb') as f:
pickle.dump([train_loss, val_loss, train_recon, train_KLD, val_recon, val_KLD], f)
# Save the loss figure
plt.clf()
fig = plt.figure(figsize=(8,6))
plt.rcParams['font.size'] = 12
plt.plot(train_loss, label='training loss')
plt.plot(val_loss, label='validation loss')
plt.legend(fontsize=16, title=self.model_name, title_fontsize=20)
plt.xlabel('epochs', fontdict={'size':16})
plt.ylabel('loss', fontdict={'size':16})
fig_file = os.path.join(save_dir, 'loss_{}.png'.format(tag))
plt.savefig(fig_file)
plt.clf()
fig = plt.figure(figsize=(8,6))
plt.rcParams['font.size'] = 12
plt.plot(train_recon, label='Reconstruction')
plt.plot(train_KLD, label='KL Divergence')
plt.legend(fontsize=16, title='{}: Training'.format(self.model_name), title_fontsize=20)
plt.xlabel('epochs', fontdict={'size':16})
plt.ylabel('loss', fontdict={'size':16})
fig_file = os.path.join(save_dir, 'loss_train_{}.png'.format(tag))
plt.savefig(fig_file)
plt.clf()
fig = plt.figure(figsize=(8,6))
plt.rcParams['font.size'] = 12
plt.plot(val_recon, label='Reconstruction')
plt.plot(val_KLD, label='KL Divergence')
plt.legend(fontsize=16, title='{}: Validation'.format(self.model_name), title_fontsize=20)
plt.xlabel('epochs', fontdict={'size':16})
plt.ylabel('loss', fontdict={'size':16})
fig_file = os.path.join(save_dir, 'loss_val_{}.png'.format(tag))
plt.savefig(fig_file)
def train_kvae(self, logger, save_dir, train_dataloader, val_dataloader, train_num, val_num):
# Load training parameters
epochs = self.cfg.getint('Training', 'epochs')
early_stop_patience = self.cfg.getint('Training', 'early_stop_patience')
save_frequency = self.cfg.getint('Training', 'save_frequency')
scheduler_training = self.cfg.getboolean('Training', 'scheduler_training')
only_vae_epochs = self.cfg.getint('Training', 'only_vae_epochs')
kf_update_epochs = self.cfg.getint('Training', 'kf_update_epochs')
# Create python list for loss
train_loss = np.zeros((epochs,))
val_loss = np.zeros((epochs,))
train_vae = np.zeros((epochs,))
train_lgssm = np.zeros((epochs,))
val_vae = np.zeros((epochs,))
val_lgssm = | np.zeros((epochs,)) | numpy.zeros |
"""
The storage and manipulation of probability distributions is fundamental to the operation of ``uravu`` and Bayesian inference.
The :py:class:`~uravu.distribution.Distribution` class oversees these operations.
"""
# Copyright (c) <NAME>
# Distributed under the terms of the MIT License
# author: <NAME>
import numpy as np
from scipy.stats import normaltest
from uravu.kde import gaussian_kde
from scipy.optimize import minimize
class Distribution:
"""
In addition to storage of the probability distribution, this class allows for some basic analysis, such as determination of normality.
Attributes:
samples (:py:attr:`array_like`): Samples in the distribution.
name (:py:attr:`str`): Distribution name.
ci_points (:py:attr:`array_like`): The percentiles at which confidence intervals should be found.
normal (:py:attr:`bool`): Are the samples normally distributed?
kde (:py:class:`scipy.stats.kde.gaussian_kde`): Kernel density approximation for the distribution.
Args:
samples (:py:attr:`array_like`): Sample for the distribution.
name (:py:attr:`str`, optional): A name to identify the distribution. Default is :py:attr:`'Distribution'`.
ci_points (:py:attr:`array_like`, optional): The two percentiles at which confidence intervals should be found. Default is :py:attr:`[2.5, 97.5]` (a 95 % confidence interval).
.. _FAQ: ./faq.html
"""
def __init__(self, samples, name="Distribution", ci_points=None):
"""
Initialisation function for a :py:class:`~uravu.distribution.Distribution` object.
"""
self.name = name
self.samples = np.array([])
if ci_points is None:
self.ci_points = np.array([2.5, 97.5])
else:
if len(ci_points) != 2:
raise ValueError("The ci_points must be an array of length two.")
self.ci_points = np.array(ci_points)
self.normal = False
self.add_samples(np.array(samples))
@property
def size(self):
"""
Get the number of samples in the distribution.
Returns:
:py:attr:`int`: Number of samples.
"""
return self.samples.size
def check_normality(self):
"""
Uses a :func:`scipy.stats.normaltest()` to evaluate if samples are normally distributed and updates the :py:attr:`~uravu.distribution.Distribution.normal` attribute.
"""
alpha = 0.05
test_samples = self.samples
if self.size > 500:
test_samples = np.random.choice(self.samples, size=500)
p_value = normaltest(test_samples)[1]
if p_value > alpha:
self.normal = True
else:
self.normal = False
def pdf(self, x):
"""
Get the probability density function for the distribution.
Args:
x (:py:attr:`float`): Value to return probability of.
Return:
:py:attr:`float`: Probability.
"""
return self.kde.pdf(x)
def logpdf(self, x):
"""
Get the natural log probability density function for the distribution.
Args:
x (:py:attr:`float`): Value to return natural log probability of.
Return:
:py:attr:`float`: Natural log probability.
"""
return self.kde.logpdf(x)
def negative_pdf(self, x):
"""
Get the negative of the probability density function for the distribution.
Args:
x (:py:attr:`float`): Value to return negative probability of.
Return:
:py:attr:`float`: Negative probability.
"""
return -self.kde.pdf(x)
@property
def dist_max(self):
"""
Get the value that maximises the distribution. If no :py:attr:`kde` has been created (for example if the distribution has fewer than 8 values) the median is returned.
Returns
:py:attr:`float`: Most likely value.
"""
try:
return minimize(self.negative_pdf, x0=[self.n]).x
except AttributeError:
return self.n
@property
def min(self):
"""
Get sample minimum.
Returns:
:py:attr:`float`: Sample minimum.
"""
return self.samples.min()
@property
def max(self):
"""
Get sample maximum.
Returns:
:py:attr:`float`: Sample maximum.
"""
return self.samples.max()
@property
def n(self):
"""
Get the median value of the distribution (for a normal distribution this is the same as the mean).
Returns:
:py:attr:`float`: Median value.
"""
return np.percentile(self.samples, [50])[0]
@property
def s(self):
"""
Get the standard deviation of the distribution. For a non-normal distribution, this will return :py:attr:`None`.
Returns:
:py:attr:`float` or :py:attr:`None`: Standard deviation of the distribution.
"""
if self.normal:
return | np.std(self.samples, ddof=1) | numpy.std |
import numpy as np
import lazy_property
import abc
from scipy.spatial import ConvexHull
import cvxpy
import pandas as pd
from mb_api import environments
from mb_api import auction_data
class DimensionlessCollusionMetrics:
def __init__(self, deviations):
self._deviations = ordered_deviations(deviations)
@lazy_property.LazyProperty
def equilibrium_index(self):
return np.where(self._deviations == 0)[0][0]
@abc.abstractmethod
def __call__(self, env):
""""""
def _normalized_deviation_temptation(self, env):
payoffs = self._get_payoffs(env)
return np.max(payoffs) - payoffs[self.equilibrium_index]
def _get_payoffs(self, env):
beliefs, cost = env[:-1], env[-1]
return np.multiply(beliefs, 1 + self._deviations - cost)
def ordered_deviations(deviations):
return np.sort(list(set([0] + deviations)))
class IsNonCompetitive(DimensionlessCollusionMetrics):
def __call__(self, env):
return 1. - 1. * np.isclose(
self._normalized_deviation_temptation(env), .0)
class EfficientIsNonCompetitive(DimensionlessCollusionMetrics):
min_markup = 0
max_markup = .5
def __init__(self, deviations):
super().__init__(deviations)
def __call__(self, env):
beliefs = env[:-1]
cost_bounds = self._get_cost_bounds(beliefs)
consistent = self.is_consistent(cost_bounds)
return 1 - 1. * consistent
def _get_cost_bounds(self, beliefs):
d0 = beliefs[self.equilibrium_index]
return [self._cost_bound(d0, dn, rho) for dn, rho in
zip(beliefs, self._deviations)]
def cost_lower_bound(self, cost_bounds):
dev_bound = max(cost_bounds[:self.equilibrium_index]) if \
self.equilibrium_index > 0 else 0
return max(dev_bound, 1 / (1 + self.max_markup))
def cost_upper_bound(self, cost_bounds):
dev_bound = min(cost_bounds[self.equilibrium_index + 1:]) if \
self.equilibrium_index + 1 < len(cost_bounds) else 1
return min(dev_bound, 1 / (1 + self.min_markup))
def is_consistent(self, cost_bounds):
return self.cost_lower_bound(cost_bounds) <= \
self.cost_upper_bound(cost_bounds)
def _cost_bound(self, d0, dn, rho):
is_exception, return_value = self._is_exception(d0, dn, rho)
if is_exception:
return return_value
return (d0 - (1 + rho) * dn)/(d0 - dn)
@staticmethod
def _is_exception(d0, dn, rho):
if -1e-8 <= rho < 0:
return True, 0
elif 0 < rho < 1e-8:
return True, 1
elif np.isclose(d0, 0) and np.isclose(dn, 0):
return True, 1 * (rho > 0)
elif np.isclose(dn, d0):
return True, np.NAN
elif rho < 0 and dn < d0:
return True, 0
else:
return False, None
class NormalizedDeviationTemptation(DimensionlessCollusionMetrics):
def __call__(self, env):
return self._normalized_deviation_temptation(env)
class DeviationTemptationOverProfits(DimensionlessCollusionMetrics):
def __call__(self, env):
cost = env[-1]
eq_belief = env[self.equilibrium_index]
if cost < 1:
return self._normalized_deviation_temptation(env) / (
eq_belief * (1 - cost))
else:
return 0
class ConvexProblem:
def __init__(self, metrics, beliefs, demands, tolerance,
moment_matrix, moment_weights):
self._metrics = np.array(metrics).reshape(-1, 1)
self._beliefs = np.array(beliefs)
self._demands = np.array(demands).reshape(-1, 1)
self._tolerance = tolerance
self._moment_matrix = moment_matrix
self._moment_weights = moment_weights
@lazy_property.LazyProperty
def variable(self):
return cvxpy.Variable((len(self._metrics), 1))
@lazy_property.LazyProperty
def constraints(self):
return self._is_distribution + self._moment_constraint
@property
def _is_distribution(self):
return [self.variable >= 0, cvxpy.sum(self.variable) == 1]
@property
def _moment_constraint(self):
delta = cvxpy.matmul(self._beliefs.T, self.variable) - \
self._demands
moment = 1e2 * cvxpy.matmul(self._moment_matrix, delta)
return [cvxpy.matmul(
self._moment_weights, cvxpy.square(moment)) <= 1e4 *
self._tolerance]
@lazy_property.LazyProperty
def objective(self):
return cvxpy.Minimize(
cvxpy.sum(cvxpy.multiply(self.variable, self._metrics)))
@lazy_property.LazyProperty
def problem(self):
return cvxpy.Problem(self.objective, self.constraints)
@lazy_property.LazyProperty
def solution(self):
return self.problem.solve()
class MinCollusionSolver:
_precision = .0001
_environment_cls = environments.Environment
_pbm_cls = ConvexProblem
def __init__(self, data, deviations, metric, plausibility_constraints,
tolerance=None, num_points=1e6, seed=0, project=False,
filter_ties=None, moment_matrix=None, moment_weights=None,
confidence_level=.95, enhanced_guesses=False):
self._data = data
self.metric = metric(deviations)
self._deviations = ordered_deviations(deviations)
self._constraints = plausibility_constraints
self._tolerance = None if tolerance is None else np.array(tolerance)
self._seed = seed
self._num_points = num_points
self._project = project
self._enhanced_guesses = enhanced_guesses
self._filter_ties = filter_ties
self._initial_guesses = self._environments_from_demand(21)
self._moment_matrix = moment_matrix if moment_matrix is not None \
else self.default_moment_matrix
self._moment_weights = moment_weights if moment_weights is not None \
else self.default_moment_weights
self._confidence_level = confidence_level
def _environments_from_demand(self, n):
if not self._enhanced_guesses:
return np.array([
list(self.demands) + [c] for c in np.linspace(0, 1, n)])
else:
return np.array(
[list(self.demands) + [c] for c in np.linspace(0, 1, n)]
+ self._winner_env + self._loser_env
)
@property
def _winner_env(self):
env = 1 * (np.array(self._deviations) <= 0)
return [[1] * (len(env) + 1), list(np.append(env, 0))]
@property
def _loser_env(self):
env_0 = 0 * (np.array(self._deviations))
env_0 = np.append(env_0, 1)
env_1 = 1 * (np.array(self._deviations) < 0)
env_1 = np.append(env_1, 1)
return [list(env_0), list(env_1)]
@property
def default_moment_matrix(self):
return auction_data.moment_matrix(len(self._deviations), 'level')
@property
def default_moment_weights(self):
return np.ones_like(self._deviations)
@property
def environment(self):
return self._environment_cls(
len(self._deviations),
constraints=self._constraints,
project_constraint=self._project,
initial_guesses=self._initial_guesses
)
@property
def epigraph_extreme_points(self):
env_perf = self._env_with_perf
interior_env_perf = self._get_interior_dimensions(env_perf)
return env_perf[ConvexHull(interior_env_perf).vertices, :]
@property
def _env_with_perf(self):
env = self.environment.generate_environments(
num_points=self._num_points, seed=self._seed)
perf = np.apply_along_axis(self.metric, 1, env).reshape(-1, 1)
return np.append(env, perf, 1)
def _get_interior_dimensions(self, env_perf):
variability = np.std(env_perf, axis=0)
full_dimensions = variability > self._precision
return env_perf[:, full_dimensions]
@staticmethod
def belief_extreme_points(epigraph):
return epigraph[:, :-2]
@staticmethod
def metric_extreme_points(epigraph):
return epigraph[:, -1]
@lazy_property.LazyProperty
def demands(self):
return self.filtered_data.assemble_target_moments(self.deviations)
@property
def filtered_data(self) -> auction_data.AuctionData:
if self._filter_ties is not None:
return self._filter_ties(self._data)
return self._data
@property
def share_of_ties(self):
return 1. - self.filtered_data.df_bids.shape[0] / \
self._data.df_bids.shape[0]
@property
def problem(self):
epigraph = self.epigraph_extreme_points
return self._pbm_cls(
metrics=self.metric_extreme_points(epigraph),
beliefs=self.belief_extreme_points(epigraph),
demands=self.demands,
tolerance=self.tolerance,
moment_matrix=self._moment_matrix,
moment_weights=self._moment_weights
)
@property
def tolerance(self):
if self._tolerance is None:
self._tolerance = self._compute_tolerance()
tol = np.maximum(self._tolerance, 1e-8).reshape(-1, 1)
return tol if len(tol) > 1 else float(tol)
def _compute_tolerance(self):
distances = self._moment_distances
return np.percentile(distances, 100 * self._confidence_level,
axis=distances.ndim-1)
@property
def _moment_distances(self):
bootstrap_demand_sample = self.filtered_data.bootstrap_demand_sample(
self._deviations, num_samples=100)
target_demands = np.array(self.demands).reshape(1, -1)
delta = np.add(bootstrap_demand_sample, -target_demands)
moments_delta = | np.dot(self._moment_matrix, delta.T) | numpy.dot |
import numpy as np
a = (45/180) * np.pi
R = np.array(
[[np.cos(a), -np.sin(a), 0],
[np.sin(a), np.cos(a), 0],
[0, 0, 1]])
print("\nR = ")
print(R)
S = np.diag([1.0, 2.0, 3.0])
A = np.dot(np.dot(R, S), R.T)
print("\nA = ")
print(A)
evals, evecs = np.linalg.eig(A)
idx = np.argsort(np.abs(evals)) # smallest first
U = evecs[:, idx] # sort columns
D = np.diag(evals[idx])
assert np.allclose(np.abs(R), np.abs(U))
assert np.allclose(D, S)
assert np.allclose(A, np.dot(U, np.dot(D, U.T)))
a = -a
RR = np.array([[np.cos(a), -np.sin(a), 0],
[ | np.sin(a) | numpy.sin |
"""
@brief test log(time=120s)
"""
import unittest
import warnings
import sys
from logging import getLogger
from contextlib import redirect_stdout
from io import StringIO
import numpy
import onnx
from scipy.sparse import coo_matrix, csr_matrix, SparseEfficiencyWarning
from scipy.special import ( # pylint: disable=E0611
expit as logistic_sigmoid, erf)
from scipy.spatial.distance import cdist
from onnx import TensorProto, __version__ as onnx_version
from onnx.helper import make_sparse_tensor, make_tensor
from onnx.defs import onnx_opset_version
from onnx.numpy_helper import from_array
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.texthelper import compare_module_version
from sklearn.utils.extmath import softmax
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAbs, OnnxAdd, OnnxAnd,
OnnxArgMax_11, OnnxArgMax,
OnnxArgMin_11, OnnxArgMin,
OnnxBatchNormalization,
OnnxAcos, OnnxAcosh, OnnxAsin, OnnxAsinh, OnnxAtan, OnnxAtanh,
OnnxAveragePool,
OnnxCast, OnnxCeil, OnnxClip,
OnnxCompress,
OnnxConcat, OnnxConv, OnnxConvTranspose,
OnnxConstant, OnnxConstant_9, OnnxConstant_11,
OnnxConstant_12, OnnxConstant_13,
OnnxConstantOfShape,
OnnxCos, OnnxCosh,
OnnxCumSum,
OnnxDequantizeLinear,
OnnxDet, OnnxDiv,
OnnxDropout, OnnxDropout_7,
OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike,
OnnxFlatten, OnnxFloor,
OnnxGreater, OnnxGreaterOrEqual, OnnxGemm, OnnxGlobalAveragePool,
OnnxIdentity, OnnxIsNaN,
OnnxLess, OnnxLessOrEqual,
OnnxLog, OnnxLpNormalization,
OnnxMatMul, OnnxMax, OnnxMaxPool, OnnxMean, OnnxMin, OnnxMod, OnnxMul,
OnnxNeg, OnnxNot,
OnnxOr,
OnnxPad, OnnxPow,
OnnxQLinearConv, OnnxQuantizeLinear,
OnnxRange,
OnnxReciprocal,
OnnxReduceL1, OnnxReduceL2,
OnnxReduceLogSumExp, OnnxReduceMax, OnnxReduceMean, OnnxReduceMin,
OnnxReduceProd,
OnnxReduceSum, OnnxReduceSumApi11, OnnxReduceSum_11, OnnxReduceSum_1,
OnnxReduceSumSquare,
OnnxRelu, OnnxReshape,
OnnxRound,
OnnxScatterElements, OnnxShape, OnnxSlice, OnnxSigmoid, OnnxSign,
OnnxSin, OnnxSinh,
OnnxSize, OnnxSoftmax,
OnnxSplit, OnnxSplitApi11,
OnnxSqrt, OnnxSub, OnnxSum,
OnnxSqueeze, OnnxSqueezeApi11,
OnnxTan, OnnxTanh, OnnxTopK, OnnxTranspose,
OnnxUnsqueeze, OnnxUnsqueezeApi11
)
try:
from skl2onnx.algebra.onnx_ops import OnnxCelu
except ImportError:
OnnxCelu = None
try:
from skl2onnx.algebra.onnx_ops import OnnxBatchNormalization_14
except ImportError:
OnnxBatchNormalization_14 = None
from skl2onnx import __version__ as skl2onnx_version, __max_supported_opset__
from mlprodict.onnxrt import OnnxInference
from mlprodict.tools.asv_options_helper import (
get_opset_number_from_onnx, get_ir_version_from_onnx)
from mlprodict.onnxrt.validate.validate_python import validate_python_inference
from mlprodict.onnxrt.ops_cpu.op_batch_normalization import (
_batchnorm_test_mode, _batchnorm_training_mode)
from mlprodict.onnxrt.ops_cpu.op_average_pool import (
_get_output_shape, _pool, _get_pad_shape)
from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool
from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611,E0401
topk_element_min_double, topk_element_max_double,
topk_element_fetch_double,
topk_element_min_float, topk_element_max_float, topk_element_fetch_float,
topk_element_min_int64, topk_element_max_int64, topk_element_fetch_int64)
from mlprodict.onnxrt.ops_cpu.op_celu import _vcelu1, pycelu
from mlprodict.onnxrt.ops_cpu.op_topk import topk_sorted_implementation
from mlprodict.onnxrt.ops_cpu.op_pad import _pad_impl
from mlprodict.onnxrt.ops_cpu.op_max_pool import (
_pool_get_output_shape, _pool_impl)
from mlprodict.onnxrt.ops_cpu.op_dropout import _dropout
from mlprodict.onnxrt.ops_cpu._op_helper import proto2dtype
from mlprodict.onnx_tools.onnx2py_helper import (
guess_proto_dtype, _elem_type_as_str)
from mlprodict.tools.data_types import (
FloatTensorType, Int64TensorType, DoubleTensorType, StringTensorType,
Int32TensorType, BooleanTensorType, UInt8TensorType,
Int16TensorType, Int8TensorType, UInt16TensorType,
UInt32TensorType, UInt64TensorType, Float16TensorType)
from mlprodict.testing.test_utils.quantized_tensor import (
QuantizedTensor, QuantizedBiasTensor, test_qlinear_conv)
from mlprodict.onnxrt.ops_cpu.op_qlinear_conv_ import ( # pylint: disable=W0611,E0611,E0401
test_qgemm0, test_qgemm1)
from mlprodict.onnxrt.ops_cpu.op_constant import Constant_12, Constant_11, Constant_9
try:
numpy_str = numpy.str_
except ImportError:
numpy_str = str
try:
numpy_bool = numpy.bool_
except ImportError:
numpy_bool = bool
sparse_support = []
sparse_no_numpy = []
python_tested = []
def make_coo_matrix(*args, **kwargs):
coo = coo_matrix(*args, **kwargs)
coo.row = coo.row.astype(numpy.int64)
coo.col = coo.col.astype(numpy.int64)
return coo
def wraplog():
# from datetime import datetime
def wrapper(fct):
def call_f(self):
# no = datetime.now()
# print('BEGIN %s' % fct.__name__)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", DeprecationWarning)
fct(self)
# print('DONE %s - %r' % (fct.__name__, datetime.now() - no))
return call_f
return wrapper
class TestOnnxrtPythonRuntime(ExtTestCase): # pylint: disable=R0904
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
if __name__ == "__main__":
import pprint
print('-----------')
pprint.pprint(sparse_support)
print('-----------')
pprint.pprint(sparse_no_numpy)
print('-----------')
pprint.pprint(
list(sorted({_.__name__ for _ in python_tested})))
print('-----------')
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
def test_opset_skl2onnx(self):
opset_mlprodict = get_opset_number_from_onnx()
opset_skl2onnx = __max_supported_opset__
self.assertGreater(opset_skl2onnx, opset_mlprodict)
def common_expected_shapes_types(self, oinf, inputs, got, onnx_cl, model_def,
raise_shape=False):
expected_types = oinf.infer_types()
self.assertEqual(set(got) & set(expected_types), set(got))
for k, v in got.items():
if expected_types[k] in (str, numpy.str_):
# Type mismatch: dtype('<U32') != <class 'str'>
continue
if v.dtype != expected_types[k]:
raise AssertionError(
"Type mismatch: %r != %r\nexpected_types=%r\ngot=%r"
"\n----\n%r" % (
v.dtype, expected_types[k], expected_types, got,
model_def))
try:
expected_shapes = oinf.infer_shapes()
self.assertEqual(set(got) & set(expected_shapes), set(got))
except RuntimeError as e:
if raise_shape:
raise e
warnings.warn("infer_shapes fails for operator %r." % onnx_cl)
res = oinf.infer_sizes(inputs)
self.assertIsInstance(res, dict)
@ignore_warnings(category=(RuntimeWarning, DeprecationWarning,
SparseEfficiencyWarning, PendingDeprecationWarning))
def common_test_onnxt_runtime_unary(self, onnx_cl, np_fct,
op_version=None,
outputs=None, debug=False,
do_sparse=True, raise_shape=False):
if op_version is None:
op_version = get_opset_number_from_onnx()
try:
onx = onnx_cl('X', output_names=['Y'], op_version=op_version)
except RuntimeError as e:
raise RuntimeError('onnx.opset={} op_version={}'.format(
get_opset_number_from_onnx(), op_version)) from e
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
model_def = onx.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version,
outputs=outputs)
if debug:
print(model_def)
python_tested.append(onnx_cl)
# python code
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': X.astype(numpy.float32)})
# no inplace
oinf = OnnxInference(model_def, inplace=False)
all_names = "\n".join(
"%s>=v%d" % (op.ops_.__class__.__name__,
op.ops_._schema.since_version) # pylint: disable=W0212
for op in oinf.sequence_)
if debug:
got = oinf.run({'X': X.astype(numpy.float32)},
verbose=1, fLOG=print)
else:
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.common_expected_shapes_types(
oinf, {'X': X.astype(numpy.float32)}, got, onnx_cl,
model_def, raise_shape=raise_shape)
try:
self.assertEqualArray(np_fct(X), got['Y'], decimal=5)
except AssertionError as e:
raise AssertionError(
'onnx.opset={} op_version={}\n--ONNX--\n{}\n--NAMES--\n{}'.format(
get_opset_number_from_onnx(), op_version, model_def,
all_names)) from e
# inplace
oinf = OnnxInference(model_def, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(np_fct(X), got['Y'], decimal=5)
# inplace2
onx2 = OnnxIdentity(
onnx_cl('X', op_version=op_version),
output_names=['Y'], op_version=op_version)
model_def2 = onx2.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version,
outputs=outputs)
oinf = OnnxInference(model_def2, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(np_fct(X), got['Y'], decimal=5)
# input inplace
expe = np_fct(X)
oinf = OnnxInference(model_def, input_inplace=True, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(expe, got['Y'], decimal=5)
# sparse
if do_sparse:
row = numpy.array([0, 0, 1, 3, 1])
col = numpy.array([0, 2, 1, 3, 1])
data = numpy.array([1, 1, 1, 1, 1])
X = make_coo_matrix((data, (row.astype(numpy.int64),
col.astype(numpy.int64))),
shape=(4, 4), dtype=numpy.float32)
try:
exp = np_fct(X)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((onnx_cl.__name__, op_version, e))
return
model_def_sparse = onx.to_onnx(
{'X': X.astype(numpy.float32)}, target_opset=op_version)
oinf = OnnxInference(
model_def_sparse, input_inplace=False, inplace=True)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualSparseArray(exp, got['Y'], decimal=5)
sparse_support.append(('UnOp', op_version, onnx_cl.__name__))
@ignore_warnings(category=(RuntimeWarning, DeprecationWarning,
SparseEfficiencyWarning, PendingDeprecationWarning))
def common_test_onnxt_runtime_binary(self, onnx_cl, np_fct,
dtype=numpy.float32,
op_version=None, debug=False,
raise_shape=False):
if op_version is None:
op_version = get_opset_number_from_onnx()
idi = numpy.identity(2, dtype=dtype)
onx = onnx_cl('X', idi, output_names=['Y'], op_version=op_version)
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float64)
model_def = onx.to_onnx({'X': X.astype(dtype)},
target_opset=op_version)
oinf = OnnxInference(model_def)
if debug:
got = oinf.run({'X': X.astype(dtype)}, verbose=1, fLOG=print)
else:
got = oinf.run({'X': X.astype(dtype)})
self.assertEqual(list(sorted(got)), ['Y'])
self.common_expected_shapes_types(
oinf, {'X': X.astype(dtype)}, got, onnx_cl, model_def,
raise_shape=raise_shape)
exp = np_fct(X, idi)
self.assertEqualArray(exp, got['Y'], decimal=5)
# python code
python_tested.append(onnx_cl)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': X.astype(dtype)})
# sparse
idi = make_coo_matrix(numpy.identity(2)).astype(numpy.float32)
X = make_coo_matrix(numpy.array(
[[0, 2], [3, -4]], dtype=numpy.float32))
try:
exp = np_fct(X, idi)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((onnx_cl.__name__, op_version, e))
return
onx = onnx_cl('X', idi, output_names=['Y'], op_version=op_version)
model_def_sparse = onx.to_onnx({'X': X}, target_opset=op_version)
try:
oinf = OnnxInference(
model_def_sparse, input_inplace=False, inplace=True)
except RuntimeError as e:
raise RuntimeError(
"Unable to load sparse model\n{}".format(
model_def_sparse)) from e
if debug:
got = oinf.run({'X': X}, verbose=1, fLOG=print)
else:
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
if isinstance(exp, (coo_matrix, csr_matrix)):
self.assertEqualSparseArray(exp, got['Y'], decimal=5)
elif isinstance(exp, numpy.ndarray):
self.assertEqualArray(exp, got['Y'], decimal=5)
else:
self.assertEqual(exp, got['Y'])
sparse_support.append(('BinOp', op_version, onnx_cl.__name__))
@wraplog()
def test_onnxt_runtime_abs(self):
self.common_test_onnxt_runtime_unary(OnnxAbs, numpy.abs)
@wraplog()
def test_onnxt_runtime_abs_debug(self):
f = StringIO()
with redirect_stdout(f):
self.common_test_onnxt_runtime_unary(
OnnxAbs, numpy.abs, debug=True)
@wraplog()
def test_onnxt_runtime_acos(self):
self.common_test_onnxt_runtime_unary(OnnxAcos, numpy.arccos)
@wraplog()
def test_onnxt_runtime_acosh(self):
self.common_test_onnxt_runtime_unary(OnnxAcosh, numpy.arccosh)
@wraplog()
def test_onnxt_runtime_add(self):
self.common_test_onnxt_runtime_binary(OnnxAdd, numpy.add)
@wraplog()
def test_onnxt_runtime_and(self):
self.common_test_onnxt_runtime_binary(
OnnxAnd, numpy.logical_and, dtype=numpy.bool_)
@wraplog()
def test_onnxt_runtime_argmax(self):
opsets = list(range(11, get_opset_number_from_onnx() + 1))
opsets = ['11only'] + opsets
for opset in opsets:
with self.subTest(opset=opset):
X = numpy.array([[2, 1], [0, 1]], dtype=float)
if opset == '11only':
clarg = OnnxArgMax_11
opset = 11
br = True
else:
clarg = OnnxArgMax
br = False
onx = clarg('X', output_names=['Y'], keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmax(
X, axis=0), got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X}, got, clarg, model_def)
if br:
continue
oinfpy = OnnxInference(
model_def, runtime="python", inplace=True)
validate_python_inference(
oinfpy, {'X': X.astype(numpy.float32)})
onx = OnnxArgMax('X', output_names=['Y'], axis=1, keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmax(X, axis=1).ravel(),
got['Y'].ravel())
onx = OnnxArgMax('X', output_names=['Y'], axis=1, keepdims=1,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmax(X, axis=1).ravel(),
got['Y'].ravel())
# sparse
X = make_coo_matrix(X, dtype=numpy.float32)
try:
exp = numpy.argmax(X, axis=1)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((OnnxArgMax.__name__, None, e))
return
model_def_sparse = onx.to_onnx({'X': X},
target_opset=opset)
oinf = OnnxInference(model_def_sparse, input_inplace=False)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(exp, got['Y'], decimal=5)
X = numpy.array([[2, 1], [0, 1]], dtype=float)
sparse_support.append(('UnOp', None, OnnxArgMax.__name__))
python_tested.append(OnnxArgMax)
@unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0")
@wraplog()
def test_onnxt_runtime_argmax_12(self):
self.assertGreater(onnx_opset_version(), 12)
from skl2onnx.algebra.onnx_ops import OnnxArgMax_12 # pylint: disable=E0611
X = numpy.array([[2, 2, 1], [0, 1, 1]], dtype=float)
onx = OnnxArgMax_12('X', output_names=['Y'], keepdims=0, axis=1,
select_last_index=1, op_version=12)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.array([1, 2], dtype=numpy.int64),
got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxArgMax_12, model_def)
@wraplog()
def test_onnxt_runtime_argmin(self):
opsets = list(range(11, get_opset_number_from_onnx() + 1))
opsets = ['11only'] + opsets
for opset in opsets:
with self.subTest(opset=opset):
if opset == '11only':
clarg = OnnxArgMin_11
opset = 11
br = True
else:
clarg = OnnxArgMin
br = False
X = numpy.array([[2, 1], [0, 1]], dtype=float)
onx = clarg('X', output_names=['Y'], keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmin(
X, axis=0), got['Y'], decimal=5)
if br:
continue
oinfpy = OnnxInference(
model_def, runtime="python", inplace=True)
validate_python_inference(
oinfpy, {'X': X.astype(numpy.float32)})
self.common_expected_shapes_types(
oinfpy, {'X': X.astype(numpy.float32)},
got, clarg, model_def)
onx = OnnxArgMin('X', output_names=['Y'], axis=1, keepdims=0,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmin(X, axis=1).ravel(),
got['Y'].ravel())
onx = OnnxArgMin('X', output_names=['Y'], axis=1, keepdims=1,
op_version=opset)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.argmin(X, axis=1).ravel(),
got['Y'].ravel())
# sparse
X = make_coo_matrix(X, dtype=numpy.float32)
try:
exp = numpy.argmin(X, axis=1)
except (TypeError, NotImplementedError, ValueError) as e:
# Function np_fct does not work on sparse data.
sparse_no_numpy.append((OnnxArgMin.__name__, None, e))
return
model_def_sparse = onx.to_onnx({'X': X}, target_opset=opset)
oinf = OnnxInference(model_def_sparse, input_inplace=False)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(exp, got['Y'], decimal=5)
sparse_support.append(('UnOp', None, OnnxArgMin.__name__))
python_tested.append(OnnxArgMin)
@unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0")
@wraplog()
def test_onnxt_runtime_argmin_12(self):
self.assertGreater(onnx_opset_version(), 12)
from skl2onnx.algebra.onnx_ops import OnnxArgMin_12 # pylint: disable=E0611
X = numpy.array([[2, 1, 1], [0, 0, 1]], dtype=float)
onx = OnnxArgMin_12('X', output_names=['Y'], keepdims=0, axis=1,
select_last_index=1, op_version=12)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64),
got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxArgMin_12, model_def)
@wraplog()
def test_onnxt_runtime_asin(self):
self.common_test_onnxt_runtime_unary(OnnxAsin, numpy.arcsin)
@wraplog()
def test_onnxt_runtime_asinh(self):
self.common_test_onnxt_runtime_unary(OnnxAsinh, numpy.arcsinh)
@wraplog()
def test_onnxt_runtime_atan(self):
self.common_test_onnxt_runtime_unary(OnnxAtan, numpy.arctan)
@wraplog()
def test_onnxt_runtime_atanh(self):
self.common_test_onnxt_runtime_unary(OnnxAtanh, numpy.arctanh)
@wraplog()
def test_onnxt_runtime_atan2(self):
test_pairs = [[y, x]
for x in [3., -4., 0., -1., 1.]
for y in [5., -6., 0., -1., 1.]]
y_val = numpy.array([y for y, x in test_pairs], dtype=numpy.float32)
x_val = numpy.array([x for y, x in test_pairs], dtype=numpy.float32)
def atan2(y, x):
# size: 100000
# timeit arctan: 0.00205
# timeit arctan2: 0.00361
# timeit atan2: 0.00599
sx = numpy.sign(x)
sy = numpy.sign(y)
pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi / 2)
atan_part = numpy.arctan(y / (x + (1 - sx ** 2))) * sx ** 2
return atan_part + pi_part
self.assertEqualArray(
numpy.arctan2(y_val, x_val), atan2(y_val, x_val), decimal=5)
def _expect_average_pool(self, node, inputs, outputs, opset=None):
if opset is None:
opset = get_opset_number_from_onnx()
ginputs = [
onnx.helper.make_tensor_value_info(
node.input[0], TensorProto.FLOAT, []), # pylint: disable=E1101,
]
goutputs = [
onnx.helper.make_tensor_value_info(
node.output[0], TensorProto.FLOAT, []), # pylint: disable=E1101,
]
model_def = onnx.helper.make_model(
opset_imports=[onnx.helper.make_operatorsetid('', opset)],
graph=onnx.helper.make_graph(
name='test_average_pool', inputs=ginputs, outputs=goutputs,
nodes=[node]))
oinf = OnnxInference(model_def)
got = oinf.run({n: v for n, v in zip(node.input, inputs)})
self.assertEqual(len(got), 1)
self.assertEqualArray(outputs[0], got['y'])
@wraplog()
def test_onnxt_runtime_average_pool(self):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2, 2], auto_pad='SAME_UPPER')
x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = (2, 2)
strides = (1, 1)
out_shape = _get_output_shape(
'SAME_UPPER', x_shape[2:], kernel_shape, strides)
pad_shape = _get_pad_shape(
'SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)
pad_top = pad_shape[0] // 2
pad_bottom = pad_shape[0] - pad_top
pad_left = pad_shape[1] // 2
pad_right = pad_shape[1] - pad_left
padded = numpy.pad(
x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
mode='constant', constant_values=numpy.nan)
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[3, 3], pads=[2, 2, 2, 2],
count_include_pad=1)
x = numpy.random.randn(1, 3, 28, 28).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = (3, 3)
strides = (1, 1)
pad_bottom = 2
pad_top = 2
pad_right = 2
pad_left = 2
pad_shape = [pad_top + pad_bottom, pad_left + pad_right]
out_shape = _get_output_shape(
'VALID', numpy.add(x_shape[2:], pad_shape), kernel_shape, strides)
padded = numpy.pad(
x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
mode='constant', constant_values=0)
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape,
pad_shape, 'AVG', count_include_pad=1)
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2, 2], auto_pad='SAME_LOWER')
x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = (2, 2)
strides = (1, 1)
out_shape = _get_output_shape(
'SAME_LOWER', x_shape[2:], kernel_shape, strides)
pad_shape = _get_pad_shape(
'SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)
pad_bottom = pad_shape[0] // 2
pad_top = pad_shape[0] - pad_bottom
pad_right = pad_shape[1] // 2
pad_left = pad_shape[1] - pad_right
padded = numpy.pad(
x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
mode='constant', constant_values=numpy.nan)
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[3, 3], pads=[2, 2, 2, 2])
x = numpy.random.randn(1, 3, 28, 28).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = (3, 3)
strides = (1, 1)
pad_bottom = 2
pad_top = 2
pad_right = 2
pad_left = 2
pad_shape = [pad_top + pad_bottom, pad_left + pad_right]
out_shape = _get_output_shape(
'VALID', numpy.add(x_shape[2:], pad_shape), kernel_shape, strides)
padded = numpy.pad(
x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)),
mode='constant', constant_values=numpy.nan)
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2])
x = numpy.random.randn(1, 3, 32).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = [2]
strides = [1]
out_shape = _get_output_shape(
'VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = _pool(padded, x_shape, kernel_shape,
strides, out_shape, [0], 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2, 2])
x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = (2, 2)
strides = (1, 1)
out_shape = _get_output_shape(
'VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[5, 5], strides=[3, 3])
x = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = (5, 5)
strides = (3, 3)
out_shape = _get_output_shape(
'VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2, 2, 2])
x = numpy.random.randn(1, 3, 32, 32, 32).astype(numpy.float32)
x_shape = numpy.shape(x)
kernel_shape = [2, 2, 2]
strides = [1, 1, 1]
out_shape = _get_output_shape(
'VALID', x_shape[2:], kernel_shape, strides)
padded = x
y = _pool(
padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'AVG')
self._expect_average_pool(node, inputs=[x], outputs=[y])
python_tested.append(OnnxAveragePool)
@wraplog()
@unittest.skipIf(True, "not implemented yet")
def test_onnxt_runtime_average_pool_ceil(self):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[3, 3], strides=[2, 2], ceil_mode=True)
x = numpy.array([[[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]]]).astype(numpy.float32)
y = numpy.array([[[
[6, 7.5], [12, 13.5]]]]).astype(numpy.float32)
self._expect_average_pool(node, inputs=[x], outputs=[y])
@wraplog()
def test_onnxt_runtime_average_pool_big(self):
with self.subTest(name='test_averagepool_2d_precomputed_pads'):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[5, 5], pads=[2, 2, 2, 2])
x = numpy.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]]]).astype(numpy.float32)
y = numpy.array([[[[7, 7.5, 8, 8.5, 9],
[9.5, 10, 10.5, 11, 11.5],
[12, 12.5, 13, 13.5, 14],
[14.5, 15, 15.5, 16, 16.5],
[17, 17.5, 18, 18.5, 19]]]]).astype(numpy.float32)
self._expect_average_pool(node, inputs=[x], outputs=[y])
with self.subTest(name='test_averagepool_2d_precomputed_pads_count_include_pad'):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[5, 5], pads=[2, 2, 2, 2], count_include_pad=1)
x = numpy.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]]]).astype(numpy.float32)
y = numpy.array([[[[2.5200, 3.6000, 4.8000, 4.0800, 3.2400],
[4.5600, 6.4000, 8.4000, 7.0400, 5.5200],
[7.2000, 10.0000, 13.0000, 10.8000, 8.4000],
[6.9600, 9.6000, 12.4000, 10.2400, 7.9200],
[6.1200, 8.4000, 10.8000, 8.8800, 6.8400]]]]).astype(numpy.float32)
self._expect_average_pool(node, inputs=[x], outputs=[y])
with self.subTest(name='test_averagepool_2d_precomputed_same_upper'):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[3, 3], strides=[2, 2], auto_pad='SAME_UPPER')
x = numpy.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]]]).astype(numpy.float32)
y = numpy.array([[[[4, 5.5, 7],
[11.5, 13, 14.5],
[19, 20.5, 22]]]]).astype(numpy.float32)
self._expect_average_pool(node, inputs=[x], outputs=[y])
with self.subTest(name='test_averagepool_2d_precomputed_strides'):
node = onnx.helper.make_node(
'AveragePool', inputs=['x'], outputs=['y'],
kernel_shape=[2, 2], strides=[2, 2])
x = numpy.array([[[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]]]).astype(numpy.float32)
y = numpy.array([[[[4, 6],
[14, 16]]]]).astype(numpy.float32)
self._expect_average_pool(node, inputs=[x], outputs=[y])
@wraplog()
def test_onnxt_runtime_batch_normalization(self):
# input size: (1, 2, 1, 3)
x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32)
s = numpy.array([1.0, 1.5]).astype(numpy.float32)
bias = numpy.array([0, 1]).astype(numpy.float32)
mean = numpy.array([0, 3]).astype(numpy.float32)
var = numpy.array([1, 1.5]).astype(numpy.float32)
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(numpy.float32)
onx = OnnxBatchNormalization(
'X', s, bias, mean, var, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x}, got, OnnxBatchNormalization, model_def)
# input size: (2, 3, 4, 5)
x = numpy.random.randn(2, 3, 4, 5).astype(numpy.float32)
s = numpy.random.randn(3).astype(numpy.float32)
bias = numpy.random.randn(3).astype(numpy.float32)
mean = numpy.random.randn(3).astype(numpy.float32)
var = numpy.random.rand(3).astype(numpy.float32)
epsilon = 1e-2
y = _batchnorm_test_mode(
x, s, bias, mean, var, epsilon).astype(numpy.float32)
onx = OnnxBatchNormalization(
'X', s, bias, mean, var,
output_names=['Y'], epsilon=epsilon,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])
python_tested.append(OnnxBatchNormalization)
@wraplog()
def test_onnxt_runtime_batch_normalization_training_fct(self):
x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32)
s = numpy.array([1.0, 1.5]).astype(numpy.float32)
bias = numpy.array([0, 1]).astype(numpy.float32)
mean = numpy.array([0, 3]).astype(numpy.float32)
var = numpy.array([1, 1.5]).astype(numpy.float32)
y, scale, bias, mean, var = (
_batchnorm_training_mode(x, s, bias, mean, var))
self.assertEqualArray(
numpy.array([[[[-1.2247356, 0., 1.2247356]],
[[-0.8371035, 1., 2.8371034]]]],
dtype=numpy.float32), y)
self.assertEqualArray(
numpy.array([0., 3.], dtype=numpy.float32), scale)
self.assertEqualArray(
numpy.array([0.6666667, 0.6666667], dtype=numpy.float32), bias)
self.assertEqualArray(
numpy.array([0., 2.9999998], dtype=numpy.float32), mean)
self.assertEqualArray(
numpy.array([0.96666664, 1.4166666], dtype=numpy.float32), var)
@wraplog()
@unittest.skipIf(OnnxBatchNormalization_14 is None,
reason="onnx too old")
def test_onnxt_runtime_batch_normalization_training(self):
# input size: (1, 2, 1, 3)
x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32)
s = numpy.array([1.0, 1.5]).astype(numpy.float32)
bias = numpy.array([0, 1]).astype(numpy.float32)
mean = numpy.array([0, 3]).astype(numpy.float32)
var = numpy.array([1, 1.5]).astype(numpy.float32)
y, scale, bias, mean, var = (
_batchnorm_training_mode(x, s, bias, mean, var))
onx = OnnxBatchNormalization_14(
'X', s, bias, mean, var,
output_names=['Y', 'scale', 'bias', 'mean', 'var'],
training_mode=1, op_version=14)
try:
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=14)
except RuntimeError as e:
if "Shape inference fails" in str(e):
warnings.warn(str(e))
return
raise e
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(
list(sorted(got)), ['Y', 'bias', 'mean', 'scale', 'var'])
self.assertEqualArray(scale, got['scale'])
self.assertEqualArray(bias, got['bias'])
self.assertEqualArray(mean, got['mean'])
# self.assertEqualArray(var, got['var'])
# self.assertEqualArray(y, got['Y'])
self.assertNotEmpty(y)
self.assertNotEmpty(var)
@wraplog()
def test_onnxt_runtime_cast_out(self):
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(
numpy.float32) # pylint: disable=E1101
dest = [(TensorProto.FLOAT, numpy.float32, FloatTensorType), # pylint: disable=E1101
(TensorProto.DOUBLE, numpy.float64, # pylint: disable=E1101
DoubleTensorType), # pylint: disable=E1101
(TensorProto.INT32, numpy.int32, # pylint: disable=E1101
Int32TensorType), # pylint: disable=E1101
(TensorProto.INT64, numpy.int64, # pylint: disable=E1101
Int64TensorType), # pylint: disable=E1101
(TensorProto.INT8, numpy.int8, # pylint: disable=E1101
Int8TensorType), # pylint: disable=E1101
(TensorProto.INT16, numpy.int16, # pylint: disable=E1101
Int16TensorType), # pylint: disable=E1101
(TensorProto.UINT8, numpy.uint8, # pylint: disable=E1101
UInt8TensorType), # pylint: disable=E1101
(TensorProto.UINT32, numpy.uint32, # pylint: disable=E1101
UInt32TensorType), # pylint: disable=E1101
(TensorProto.UINT16, numpy.uint16, # pylint: disable=E1101
UInt16TensorType), # pylint: disable=E1101
(TensorProto.UINT64, numpy.uint64, # pylint: disable=E1101
UInt64TensorType), # pylint: disable=E1101
(TensorProto.FLOAT16, numpy.float16, # pylint: disable=E1101
Float16TensorType), # pylint: disable=E1101
(TensorProto.BOOL, numpy.bool_, # pylint: disable=E1101
BooleanTensorType), # pylint: disable=E1101
(TensorProto.STRING, numpy.str_, StringTensorType), ] # pylint: disable=E1101
for opset in range(9, get_opset_number_from_onnx() + 1):
for to, nptp, outp in dest:
if nptp == numpy.bool_:
self.assertIn(proto2dtype(to), (nptp, bool))
elif nptp == numpy.str_:
self.assertIn(proto2dtype(to), (nptp, str))
else:
self.assertEqual(proto2dtype(to), nptp)
self.assertEqual(to, guess_proto_dtype(nptp))
self.assertNotEmpty(_elem_type_as_str(to))
with self.subTest(opset=opset, to=to):
onx = OnnxCast('X', to=to, output_names=['Y'],
op_version=opset)
model_def = onx.to_onnx(
{'X': x}, outputs=[('Y', outp())],
target_opset=opset)
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
if nptp == numpy.str_:
self.assertEqual(
x.astype(nptp).tolist(), got['Y'].tolist())
else:
self.assertEqualArray(x.astype(nptp), got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x}, got, OnnxCast, model_def)
python_tested.append(OnnxCast)
@wraplog()
def test_onnxt_runtime_cast_in(self):
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(
numpy.float32) # pylint: disable=E1101
dest = [(TensorProto.FLOAT, numpy.float32, FloatTensorType), # pylint: disable=E1101
(TensorProto.DOUBLE, numpy.float64, # pylint: disable=E1101
DoubleTensorType), # pylint: disable=E1101
(TensorProto.INT32, numpy.int32, # pylint: disable=E1101
Int32TensorType), # pylint: disable=E1101
(TensorProto.INT64, numpy.int64, # pylint: disable=E1101
Int64TensorType), # pylint: disable=E1101
(TensorProto.INT8, numpy.int8, # pylint: disable=E1101
Int8TensorType), # pylint: disable=E1101
(TensorProto.INT16, numpy.int16, # pylint: disable=E1101
Int16TensorType), # pylint: disable=E1101
(TensorProto.UINT8, numpy.uint8, # pylint: disable=E1101
UInt8TensorType), # pylint: disable=E1101
(TensorProto.UINT32, numpy.uint32, # pylint: disable=E1101
UInt32TensorType), # pylint: disable=E1101
(TensorProto.UINT16, numpy.uint16, # pylint: disable=E1101
UInt16TensorType), # pylint: disable=E1101
(TensorProto.UINT64, numpy.uint64, # pylint: disable=E1101
UInt64TensorType), # pylint: disable=E1101
(TensorProto.FLOAT16, numpy.float16, # pylint: disable=E1101
Float16TensorType), # pylint: disable=E1101
(TensorProto.BOOL, numpy.bool_, # pylint: disable=E1101
BooleanTensorType), # pylint: disable=E1101
(TensorProto.STRING, numpy.str_, StringTensorType), ] # pylint: disable=E1101
for opset in range(9, get_opset_number_from_onnx() + 1):
for to, nptp, _ in dest:
if nptp == numpy.bool_:
self.assertIn(proto2dtype(to), (nptp, bool))
elif nptp == numpy.str_:
self.assertIn(proto2dtype(to), (nptp, str))
else:
self.assertEqual(proto2dtype(to), nptp)
self.assertEqual(to, guess_proto_dtype(nptp))
self.assertNotEmpty(_elem_type_as_str(to))
with self.subTest(opset=opset, to=to):
xi = x.astype(nptp)
onx = OnnxCast('X', to=TensorProto.STRING, # pylint: disable=E1101
output_names=['Y'],
op_version=opset)
model_def = onx.to_onnx(
{'X': xi}, outputs=[('Y', StringTensorType())],
target_opset=opset)
got = OnnxInference(model_def).run({'X': xi})
self.assertEqual(
xi.astype(str).tolist(), got['Y'].tolist())
python_tested.append(OnnxCast)
@wraplog()
def test_onnxt_runtime_ceil(self):
self.common_test_onnxt_runtime_unary(OnnxCeil, numpy.ceil)
@unittest.skipIf(OnnxCelu is None, reason="onnx too recent")
@wraplog()
def test_onnxt_runtime_celu1(self):
self.common_test_onnxt_runtime_unary(
OnnxCelu, _vcelu1, op_version=12,
outputs=[('Y', FloatTensorType([None, 2]))])
@unittest.skipIf(OnnxCelu is None, reason="onnx too recent")
@wraplog()
def test_onnxt_runtime_celu2(self):
_vcelu2 = numpy.vectorize(
lambda x: pycelu(x, 1.), otypes=[numpy.float])
self.common_test_onnxt_runtime_unary(
OnnxCelu, _vcelu2, op_version=12,
outputs=[('Y', FloatTensorType([None, 2]))])
@unittest.skipIf(onnx_opset_version() < 11,
reason="Explicitely tests Clip >= 11")
@wraplog()
def test_onnxt_runtime_clip(self):
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=None: OnnxClip(
x, numpy.array([0], dtype=numpy.float32),
output_names=output_names, op_version=op_version),
lambda x: numpy.clip(x, 0, 1e5))
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=None: OnnxClip(
x, numpy.array([-1000], dtype=numpy.float32),
numpy.array([0], dtype=numpy.float32),
op_version=op_version,
output_names=output_names),
lambda x: numpy.clip(x, -1e5, 0))
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=None: OnnxClip(
x,
numpy.array([0.1], dtype=numpy.float32),
numpy.array([2.1], dtype=numpy.float32),
output_names=output_names,
op_version=op_version),
lambda x: numpy.clip(x, 0.1, 2.1))
python_tested.append(OnnxClip)
@wraplog()
def test_onnxt_runtime_compress(self):
# axis is None
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(numpy.float32)
x = x.reshape((-1, 2))
cond = numpy.array([False, True, False])
onx = OnnxCompress('X', 'cond', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'cond': cond},
outputs=[('Y', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
exp = numpy.compress(cond, x)
oinf = OnnxInference(model_def)
got = oinf.run({'X': x, 'cond': cond})
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x, 'cond': cond}, got, OnnxCompress, model_def)
python_tested.append(OnnxCompress)
@wraplog()
def test_onnxt_runtime_clip_10(self):
from skl2onnx.algebra.onnx_ops import OnnxClip_6 # pylint: disable=E0611
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=10: OnnxClip_6(
x, min=1e-5, max=1e5, output_names=output_names,
op_version=10),
lambda x: numpy.clip(x, 1e-5, 1e5),
op_version=10)
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=10: OnnxClip(
x, min=1e-5, max=1e5, output_names=output_names,
op_version=10),
lambda x: numpy.clip(x, 1e-5, 1e5),
op_version=10)
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=10: OnnxClip(
x, max=1e-5, output_names=output_names,
op_version=10),
lambda x: numpy.clip(x, -1e5, 1e-5),
op_version=10)
self.common_test_onnxt_runtime_unary(
lambda x, output_names=None, op_version=10: OnnxClip(
x, min=0.1, max=2.1,
output_names=output_names,
op_version=10),
lambda x: numpy.clip(x, 0.1, 2.1),
op_version=10)
@wraplog()
def test_onnxt_runtime_concat(self):
cst = numpy.array([[1, 2]], dtype=numpy.float32)
onx = OnnxConcat('X', 'Y', cst, output_names=['Z'],
op_version=get_opset_number_from_onnx())
X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float64)
Y = numpy.array([[8, 9], [10, 11], [12, 13]], dtype=numpy.float64)
model_def = onx.to_onnx({'X': X.astype(numpy.float32),
'Y': Y.astype(numpy.float32)},
outputs=[('Z', FloatTensorType([2]))],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X.astype(numpy.float32),
'Y': Y.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Z'])
self.assertEqual(got['Z'].shape, (6, 2))
exp = numpy.vstack([X, Y, cst])
self.assertEqualArray(exp, got['Z'])
self.common_expected_shapes_types(
oinf, {'X': X.astype(numpy.float32),
'Y': Y.astype(numpy.float32)},
got, OnnxConcat, model_def)
python_tested.append(OnnxConstantOfShape)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(
oinfpy, {'X': X.astype(numpy.float32),
'Y': Y.astype(numpy.float32)})
python_tested.append(OnnxConcat)
@wraplog()
def test_onnxt_runtime_constant_of_shape(self):
x = numpy.array([2, 2], dtype=numpy.int64)
y = numpy.zeros((2, 2), dtype=numpy.float32)
onx = OnnxConstantOfShape('X', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.int64)},
outputs=[('Y', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x.astype(numpy.int64)})
self.assertEqualArray(y, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x.astype(numpy.int64)}, got,
OnnxConstantOfShape, model_def)
python_tested.append(OnnxConstantOfShape)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': x})
@wraplog()
def test_onnxt_runtime_conv0(self):
x = numpy.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(numpy.float32)
W = numpy.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(numpy.float32)
# test 1
y_with_padding = numpy.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor
[33., 54., 63., 72., 51.],
[63., 99., 108., 117., 81.],
[93., 144., 153., 162., 111.],
[72., 111., 117., 123., 84.]]]]).astype(numpy.float32)
onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3], pads=[1, 1, 1, 1],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
for rt in ['python', 'onnxruntime1']:
with self.subTest(runtime=rt):
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
# test 2
y_without_padding = numpy.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor
[99., 108., 117.],
[144., 153., 162.]]]]).astype(numpy.float32)
onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3], pads=[0, 0, 0, 0],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
for rt in ['python', 'onnxruntime1']:
with self.subTest(runtime=rt):
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_without_padding, got['Y'])
if rt == 'python':
self.common_expected_shapes_types(
oinf, {'X': x}, got, OnnxConv, model_def)
else:
self.assertRaise(
lambda: self.common_expected_shapes_types(
oinf, {'X': x}, got, OnnxConv, model_def),
RuntimeError)
# test 3
y = numpy.array([[[[12., 27., 24.],
[63., 108., 81.],
[72., 117., 84.]]]]).astype(numpy.float32)
onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3],
auto_pad='SAME_LOWER', strides=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
for rt in ['python', 'onnxruntime1']:
with self.subTest(runtime=rt):
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])
python_tested.append(OnnxConv)
@wraplog()
def test_onnxt_runtime_conv1(self):
x = numpy.array([[[[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.],
[25., 26., 27., 28., 29.],
[30., 31., 32., 33., 34.]]]]).astype(numpy.float32)
W = numpy.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(numpy.float32)
# test 1
y_with_padding = numpy.array([[[[12., 27., 24.], # (1, 1, 4, 3) output tensor
[63., 108., 81.],
[123., 198., 141.],
[112., 177., 124.]]]]).astype(numpy.float32)
onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
for rt in ['python', 'onnxruntime1']:
with self.subTest(runtime=rt):
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
# test 2
y_without_padding = numpy.array([[[[54., 72.], # (1, 1, 3, 2) output tensor
[144., 162.],
[234., 252.]]]]).astype(numpy.float32)
onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3], pads=[0, 0, 0, 0], strides=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
for rt in ['python', 'onnxruntime1']:
with self.subTest(runtime=rt):
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_without_padding, got['Y'])
# test 3
y_with_asymmetric_padding = numpy.array([[[[21., 33.], # (1, 1, 4, 2) output tensor
[99., 117.],
[189., 207.],
[171., 183.]]]]).astype(numpy.float32)
onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3], pads=[1, 0, 1, 0], strides=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
for rt in ['python', 'onnxruntime1']:
with self.subTest(runtime=rt):
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_asymmetric_padding, got['Y'])
@wraplog()
def test_onnxt_runtime_conv2_B(self):
x = numpy.random.rand(1, 3, 5, 4).astype(numpy.float32)
W = numpy.random.rand(4, 3, 3, 3).astype(numpy.float32)
B = numpy.array([100, 700, 1000, 7000], dtype=numpy.float32)
onx = OnnxConv(
'X', 'W', 'B', output_names=['Y'],
kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'W': W, 'B': B},
target_opset=get_opset_number_from_onnx())
ys = []
for rt in ['python', 'onnxruntime1']:
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x, 'W': W, 'B': B})
ys.append(got['Y'])
self.assertEqualArray(ys[0], ys[1], decimal=4)
@wraplog()
def test_onnxt_runtime_conv_transpose(self):
x = numpy.array([[[[0., 1., 2.], # (1, 1, 3, 3)
[3., 4., 5.],
[6., 7., 8.]]]]).astype(numpy.float32)
W = numpy.array([[[[1., 1., 1.], # (1, 2, 3, 3)
[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]]]]).astype(numpy.float32)
y_with_padding = numpy.array([[[[0., 1., 3., 3., 2.], # (1, 2, 5, 5)
[3., 8., 15., 12., 7.],
[9., 21., 36., 27., 15.],
[9., 20., 33., 24., 13.],
[6., 13., 21., 15., 8.]],
[[0., 1., 3., 3., 2.],
[3., 8., 15., 12., 7.],
[9., 21., 36., 27., 15.],
[9., 20., 33., 24., 13.],
[6., 13., 21., 15., 8.]]]]).astype(numpy.float32)
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
python_tested.append(OnnxConv)
@wraplog()
def test_onnxt_runtime_conv_transpose_B(self):
x = numpy.random.rand(1, 3, 5, 4).astype(numpy.float32)
W = numpy.random.rand(3, 4, 3, 3).astype(numpy.float32)
B = numpy.array([100, 700, 1000, 7000], dtype=numpy.float32)
onx = OnnxConvTranspose(
'X', 'W', 'B', output_names=['Y'],
kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'W': W, 'B': B},
target_opset=get_opset_number_from_onnx())
ys = []
for rt in ['python', 'onnxruntime1']:
oinf = OnnxInference(model_def, runtime=rt)
got = oinf.run({'X': x, 'W': W, 'B': B})
ys.append(got['Y'])
self.assertEqual(len(ys), 2)
# self.assertEqualArray(ys[0], ys[1])
@wraplog()
def test_onnxt_runtime_conv_transpose_1d(self):
x = numpy.array([[[0., 1., 2.]]]).astype(numpy.float32)
W = numpy.array([[[1., 1., 1.], # (1, 2, 3)
[1., 1., 1.]]]).astype(numpy.float32)
y_with_padding = numpy.array(
[[[0., 1., 3., 3., 2.], # (1, 2, 5)
[0., 1., 3., 3., 2.]]]).astype(numpy.float32)
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def, runtime="onnxruntime1")
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
python_tested.append(OnnxConvTranspose)
@wraplog()
def test_onnxt_runtime_conv_transpose_3d(self):
x = numpy.arange(60).reshape((1, 1, 3, 4, 5)).astype(numpy.float32)
W = numpy.ones((1, 2, 3, 3, 3)).astype(numpy.float32)
y_with_padding = numpy.array(
[[[[[0., 1., 3., 6., 9., 7., 4.], # (1, 2, 5, 6, 7)
[5., 12., 21., 27., 33., 24., 13.],
[15., 33., 54., 63., 72., 51., 27.],
[30., 63., 99., 108., 117., 81., 42.],
[25., 52., 81., 87., 93., 64., 33.],
[15., 31., 48., 51., 54., 37., 19.]],
[[20., 42., 66., 72., 78., 54., 28.],
[50., 104., 162., 174., 186., 128., 66.],
[90., 186., 288., 306., 324., 222., 114.],
[120., 246., 378., 396., 414., 282., 144.],
[90., 184., 282., 294., 306., 208., 106.],
[50., 102., 156., 162., 168., 114., 58.]],
[[60., 123., 189., 198., 207., 141., 72.],
[135., 276., 423., 441., 459., 312., 159.],
[225., 459., 702., 729., 756., 513., 261.],
[270., 549., 837., 864., 891., 603., 306.],
[195., 396., 603., 621., 639., 432., 219.],
[105., 213., 324., 333., 342., 231., 117.]],
[[60., 122., 186., 192., 198., 134., 68.],
[130., 264., 402., 414., 426., 288., 146.],
[210., 426., 648., 666., 684., 462., 234.],
[240., 486., 738., 756., 774., 522., 264.],
[170., 344., 522., 534., 546., 368., 186.],
[90., 182., 276., 282., 288., 194., 98.]],
[[40., 81., 123., 126., 129., 87., 44.],
[85., 172., 261., 267., 273., 184., 93.],
[135., 273., 414., 423., 432., 291., 147.],
[150., 303., 459., 468., 477., 321., 162.],
[105., 212., 321., 327., 333., 224., 113.],
[55., 111., 168., 171., 174., 117., 59.]]],
[[[0., 1., 3., 6., 9., 7., 4.],
[5., 12., 21., 27., 33., 24., 13.],
[15., 33., 54., 63., 72., 51., 27.],
[30., 63., 99., 108., 117., 81., 42.],
[25., 52., 81., 87., 93., 64., 33.],
[15., 31., 48., 51., 54., 37., 19.]],
[[20., 42., 66., 72., 78., 54., 28.],
[50., 104., 162., 174., 186., 128., 66.],
[90., 186., 288., 306., 324., 222., 114.],
[120., 246., 378., 396., 414., 282., 144.],
[90., 184., 282., 294., 306., 208., 106.],
[50., 102., 156., 162., 168., 114., 58.]],
[[60., 123., 189., 198., 207., 141., 72.],
[135., 276., 423., 441., 459., 312., 159.],
[225., 459., 702., 729., 756., 513., 261.],
[270., 549., 837., 864., 891., 603., 306.],
[195., 396., 603., 621., 639., 432., 219.],
[105., 213., 324., 333., 342., 231., 117.]],
[[60., 122., 186., 192., 198., 134., 68.],
[130., 264., 402., 414., 426., 288., 146.],
[210., 426., 648., 666., 684., 462., 234.],
[240., 486., 738., 756., 774., 522., 264.],
[170., 344., 522., 534., 546., 368., 186.],
[90., 182., 276., 282., 288., 194., 98.]],
[[40., 81., 123., 126., 129., 87., 44.],
[85., 172., 261., 267., 273., 184., 93.],
[135., 273., 414., 423., 432., 291., 147.],
[150., 303., 459., 468., 477., 321., 162.],
[105., 212., 321., 327., 333., 224., 113.],
[55., 111., 168., 171., 174., 117., 59.]]]]]).astype(numpy.float32)
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
@unittest.skipIf(True, reason="fails with output_shape")
@wraplog()
def test_onnxt_runtime_conv_transpose_output_shape(self):
x = numpy.arange(9).reshape((1, 1, 3, 3)).astype(numpy.float32)
W = numpy.ones((1, 2, 3, 3)).astype(numpy.float32)
y_with_padding = numpy.array(
[[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8)
[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]],
[[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(numpy.float32)
with self.subTest(part="output_shape"):
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
strides=[3, 2], output_shape=[10, 8],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def, runtime="onnxruntime1")
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
@wraplog()
def test_onnxt_runtime_conv_transpose_attributes(self):
x = numpy.arange(9).reshape((1, 1, 3, 3)).astype(numpy.float32)
W = numpy.ones((1, 2, 3, 3)).astype(numpy.float32)
y_with_padding = numpy.array(
[[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8)
[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]],
[[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[0., 0., 1., 1., 3., 2., 2., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[3., 3., 7., 4., 9., 5., 5., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[6., 6., 13., 7., 15., 8., 8., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(numpy.float32)
with self.subTest(part="output_padding"):
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
strides=[3, 2], output_padding=[1, 1],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
with self.subTest(part="kernel_shape"):
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
strides=[3, 2], output_shape=[10, 8],
kernel_shape=[3, 3], output_padding=[1, 1],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
@wraplog()
def test_onnxt_runtime_conv_transpose_dilation(self):
x = numpy.array([[[[3., 8., 1.], # (1, 1, 3, 3)
[9., 5., 7.],
[3., 2., 6.]]]]).astype(numpy.float32)
W = numpy.array([[[[7., 2.], # (1, 1, 2, 2)
[1., 9.]]]]).astype(numpy.float32)
y_with_padding = numpy.array(
[[[[21., 56., 13., 16., 2.], # [1, 1, 5, 5]
[63., 35., 67., 10., 14.],
[24., 22., 76., 76., 21.],
[9., 5., 88., 45., 63.],
[3., 2., 33., 18., 54.]]]]).astype(numpy.float32)
onx = OnnxConvTranspose(
'X', W, output_names=['Y'], dilations=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
@wraplog()
def test_onnxt_runtime_conv_transpose_pads(self):
x = numpy.arange(9).reshape((1, 1, 3, 3)).astype(numpy.float32)
W = numpy.ones((1, 2, 3, 3)).astype(numpy.float32)
y_with_padding = numpy.array(
[[[[1., 1., 3.], # (1, 2, 7, 3)
[1., 1., 3.],
[7., 4., 9.],
[7., 4., 9.],
[7., 4., 9.],
[13., 7., 15.],
[13., 7., 15.]],
[[1., 1., 3.],
[1., 1., 3.],
[7., 4., 9.],
[7., 4., 9.],
[7., 4., 9.],
[13., 7., 15.],
[13., 7., 15.]]]]).astype(numpy.float32)
onx = OnnxConvTranspose(
'X', W, output_names=['Y'],
strides=[3, 2], pads=[1, 2, 1, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])
@wraplog()
def test_onnxt_runtime_cos(self):
self.common_test_onnxt_runtime_unary(OnnxCos, numpy.cos)
@wraplog()
def test_onnxt_runtime_cosh(self):
self.common_test_onnxt_runtime_unary(OnnxCosh, numpy.cosh)
@wraplog()
def test_onnxt_runtime_cum_sum(self):
x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64)
axis = numpy.array([0]).astype(numpy.int32)
exp = numpy.array([1., 3., 6., 10., 15.]).astype(numpy.float64)
onx = OnnxCumSum('X', 'axis', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'axis': axis},
outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x.astype(numpy.float64),
'axis': axis})
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x.astype(numpy.float64),
'axis': axis},
got, OnnxCumSum, model_def)
python_tested.append(OnnxCumSum)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': x, 'axis': axis})
# reverse = 1
x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64)
axis = numpy.array([0]).astype(numpy.int32)
exp = numpy.array([15., 14., 12., 9., 5.]).astype(numpy.float64)
onx = OnnxCumSum('X', 'axis', output_names=['Y'], reverse=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'axis': axis},
outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
try:
got = OnnxInference(model_def).run({'X': x, 'axis': axis})
self.assertEqualArray(exp, got['Y'])
except NotImplementedError:
pass
# exclusive = 1
x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64)
axis = numpy.array([0]).astype(numpy.int32)
exp = numpy.array([0., 1., 3., 6., 10.]).astype(numpy.float64)
onx = OnnxCumSum('X', 'axis', output_names=['Y'], exclusive=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'axis': axis},
outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
try:
got = OnnxInference(model_def).run({'X': x, 'axis': axis})
self.assertEqualArray(exp, got['Y'])
except NotImplementedError:
pass
# 2d axis = 0
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(
numpy.float64).reshape((2, 3))
axis = numpy.array([0]).astype(numpy.int32)
exp = numpy.array([1., 2., 3., 5., 7., 9.]).astype(
numpy.float64).reshape((2, 3))
onx = OnnxCumSum('X', 'axis', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'axis': axis},
outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
got = OnnxInference(model_def).run({'X': x, 'axis': axis})
self.assertEqualArray(exp, got['Y'])
# 2d axis = 1
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(
numpy.float64).reshape((2, 3))
axis = numpy.array([-1]).astype(numpy.int32)
exp = numpy.array([1., 3., 6., 4., 9., 15.]).astype(
numpy.float64).reshape((2, 3))
onx = OnnxCumSum('X', 'axis', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'axis': axis},
outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
got = OnnxInference(model_def).run({'X': x, 'axis': axis})
self.assertEqualArray(exp, got['Y'])
# 2d axis = 1, reverse
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(
numpy.float64).reshape((2, 3))
axis = numpy.array([-1]).astype(numpy.int32)
exp = numpy.array([1., 3., 6., 4., 9., 15.]).astype(
numpy.float64).reshape((2, 3))
onx = OnnxCumSum('X', 'axis', output_names=['Y'], reverse=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x, 'axis': axis},
outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
try:
got = OnnxInference(model_def).run({'X': x, 'axis': axis})
self.assertEqualArray(exp, got['Y'])
except NotImplementedError:
pass
# no axis
x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64)
axis = numpy.array([0]).astype(numpy.int32)
exp = numpy.array([1., 3., 6., 10., 15.]).astype(numpy.float64)
try:
onx = OnnxCumSum('X', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': x}, outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
got = OnnxInference(model_def).run({'X': x})
self.assertEqualArray(exp, got['Y'])
except RuntimeError:
pass
# reverse = 1
x = numpy.array([1., 2., 3., 4., 5.]).astype(numpy.float64)
axis = numpy.array([0]).astype(numpy.int32)
exp = numpy.array([15., 14., 12., 9., 5.]).astype(numpy.float64)
try:
onx = OnnxCumSum('X', output_names=['Y'], reverse=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': x}, outputs=[('Y', DoubleTensorType())],
target_opset=get_opset_number_from_onnx())
got = OnnxInference(model_def).run({'X': x})
self.assertEqualArray(exp, got['Y'])
except RuntimeError:
pass
@wraplog()
def test_onnxt_runtime_det(self):
self.common_test_onnxt_runtime_unary(
OnnxDet, lambda x: numpy.array([numpy.linalg.det(x)]),
do_sparse=False)
@wraplog()
def test_onnxt_runtime_dequantize_linear(self):
X = numpy.array([[[[3, 89], [34, 200], [74, 59]],
[[5, 24], [24, 87], [32, 13]],
[[245, 99], [4, 142], [121, 102]], ], ],
dtype=numpy.uint8)
x_scale = numpy.array([2, 4, 5], dtype=numpy.float32)
x_zero_point = numpy.array([84, 24, 196], dtype=numpy.uint8)
exp = ((X.astype(numpy.float32) - x_zero_point.reshape(
(1, 3, 1, 1)).astype(numpy.float32)) *
x_scale.reshape((1, 3, 1, 1)))
onx = OnnxDequantizeLinear(
'X', x_scale, x_zero_point, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxDequantizeLinear, model_def)
X = numpy.array([0, 3, 128, 255]).astype(numpy.uint8)
x_scale = numpy.array([2], dtype=numpy.float32)
x_zero_point = numpy.array([128], dtype=numpy.uint8)
exp = numpy.array([-256, -250, 0, 254], dtype=numpy.float32)
onx = OnnxDequantizeLinear(
'X', x_scale, x_zero_point, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
python_tested.append(OnnxDequantizeLinear)
@wraplog()
def test_onnxt_runtime_div(self):
self.common_test_onnxt_runtime_binary(OnnxDiv, lambda x, y: x / y)
@wraplog()
def test_onnxt_runtime_dropout_10(self):
seed = numpy.int64(0)
X = numpy.random.randn(3, 4, 5).astype(numpy.float32)
onx = OnnxDropout_7('X', output_names=['Y'], op_version=10)
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
outputs=[('Y', FloatTensorType())],
target_opset=10)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqual(got['Y'].shape, X.shape)
self.assertEqualArray(got['Y'], _dropout(X, seed=seed)[0])
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxDropout_7, model_def)
python_tested.append(OnnxDropout)
@wraplog()
def test_onnxt_runtime_dropout(self):
seed = numpy.int64(0)
X = numpy.random.randn(3, 4, 5).astype(numpy.float32)
onx = OnnxDropout('X', output_names=['Y'], seed=seed,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
outputs=[('Y', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqual(got['Y'].shape, X.shape)
self.assertEqualArray(got['Y'], _dropout(X, seed=seed)[0])
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxDropout, model_def)
onx = OnnxDropout('X', output_names=['Y', 'Z'], seed=seed,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
outputs=[('Y', FloatTensorType()),
('Z', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y', 'Z'])
self.assertEqual(got['Y'].shape, X.shape)
res = _dropout(X, seed=seed, return_mask=True)
self.assertEqualArray(got['Y'], res[0])
self.assertEqualArray(got['Z'], res[1])
R = numpy.array([0.1], dtype=numpy.float32)
onx = OnnxDropout('X', 'R', output_names=['Y'], seed=seed,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32),
'R': R.astype(numpy.float32)},
outputs=[('Y', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X, 'R': R})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqual(got['Y'].shape, X.shape)
self.assertEqualArray(
got['Y'], _dropout(X, seed=seed, drop_probability=0.1)[0])
R = numpy.array([0.75], dtype=numpy.float32)
B = numpy.array([True])
onx = OnnxDropout('X', 'R', 'B', output_names=['Y'], seed=seed,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32),
'R': R, 'B': B},
outputs=[('Y', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X, 'R': R, 'B': B})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqual(got['Y'].shape, X.shape)
self.assertEqualArray(
got['Y'], _dropout(X, seed=seed, drop_probability=0.75,
training_mode=True)[0])
python_tested.append(OnnxDropout)
@wraplog()
def test_onnxt_runtime_einsum(self):
X = numpy.random.randn(5, 2, 3).astype(numpy.float32)
Y = numpy.random.randn(5, 3, 4).astype(numpy.float32)
equation = 'bij,bjk->bik'
onx = OnnxEinsum(
'X', 'Y', equation=equation, output_names=['Z'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32),
'Y': Y.astype(numpy.float32)},
outputs=[('Z', FloatTensorType([2]))],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X, 'Y': Y})
exp = numpy.einsum(equation, X, Y)
self.assertEqualArray(exp, got['Z'])
self.common_expected_shapes_types(
oinf, {'X': X, 'Y': Y}, got, OnnxEinsum, model_def)
python_tested.append(OnnxEinsum)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': X.astype(numpy.float32),
'Y': Y.astype(numpy.float32)})
@wraplog()
def test_onnxt_runtime_eyelike(self):
onx = OnnxEyeLike('X', k=0, output_names=['Y'])
X = numpy.array([2, 2], dtype=numpy.int64)
model_def = onx.to_onnx({'X': X.astype(numpy.int64)},
target_opset=get_opset_number_from_onnx(),
outputs=[('Y', FloatTensorType())])
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
exp = numpy.eye(*X, k=0)
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxEyeLike, model_def)
oinfpy = OnnxInference(model_def, runtime="python")
validate_python_inference(oinfpy, {'X': X.astype(numpy.int64)})
python_tested.append(OnnxEyeLike)
@wraplog()
def test_onnxt_runtime_equal(self):
self.common_test_onnxt_runtime_binary(OnnxEqual, numpy.equal)
@wraplog()
def test_onnxt_runtime_erf(self):
self.common_test_onnxt_runtime_unary(OnnxErf, erf)
@wraplog()
def test_onnxt_runtime_exp(self):
self.common_test_onnxt_runtime_unary(OnnxExp, numpy.exp)
@wraplog()
def test_onnxt_runtime_flatten(self):
shape = (2, 3, 4, 5)
x = numpy.random.random_sample(shape).astype( # pylint: disable=E1101
numpy.float32) # pylint: disable=E1101
for i in range(len(shape)):
node = OnnxFlatten('X', axis=i, output_names='Y',
op_version=get_opset_number_from_onnx())
model_def = node.to_onnx(
{'X': x}, outputs=[('Y', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
new_shape = ((1, -1) if i == 0
else (numpy.prod(shape[0:i]).astype(int), -1))
exp = numpy.reshape(x, new_shape)
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x}, got, OnnxFlatten, model_def)
python_tested.append(OnnxFlatten)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': x})
@wraplog()
def test_onnxt_runtime_floor(self):
self.common_test_onnxt_runtime_unary(OnnxFloor, numpy.floor)
@wraplog()
def test_onnxt_runtime_gather_elements0(self):
from skl2onnx.algebra.onnx_ops import OnnxGatherElements # pylint: disable=E0611
# ex 1
data = numpy.array([[1, 2],
[3, 4]], dtype=numpy.float32)
indices = numpy.array([], dtype=numpy.int64)
onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': data, 'Y': indices},
outputs=[('Z', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': data, 'Y': indices})
self.assertEqual(got['Z'].size, 0)
self.common_expected_shapes_types(
oinf, {'X': data, 'Y': indices}, got,
OnnxGatherElements, model_def)
@wraplog()
def test_onnxt_runtime_gather_elements0_fortran(self):
from skl2onnx.algebra.onnx_ops import OnnxGatherElements # pylint: disable=E0611
# ex 1
data = numpy.array([[1, 2],
[3, 4]], dtype=numpy.float32, order='F')
indices = numpy.array([], dtype=numpy.int64, order='F')
onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': data, 'Y': indices},
outputs=[('Z', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': data, 'Y': indices})
self.assertEqual(got['Z'].size, 0)
@wraplog()
def test_onnxt_runtime_gather_elements(self):
from skl2onnx.algebra.onnx_ops import OnnxGatherElements # pylint: disable=E0611
# ex 1
data = numpy.array([[1, 2],
[3, 4]], dtype=numpy.float32)
indices = numpy.array([[0, 0],
[1, 0]], dtype=numpy.int64)
onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=1,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': data, 'Y': indices},
outputs=[('Z', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': data, 'Y': indices})
exp = numpy.array([[1, 1],
[4, 3]], dtype=numpy.float32)
self.assertEqual(exp, got['Z'])
python_tested.append(OnnxGatherElements)
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': data, 'Y': indices})
# ex 2
data = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=numpy.float32)
indices = numpy.array([[1, 2, 0],
[2, 0, 0]], dtype=numpy.int32)
onx = OnnxGatherElements('X', 'Y', output_names=['Z'], axis=0,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': data, 'Y': indices},
outputs=[('Z', FloatTensorType())],
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': data, 'Y': indices})
exp = numpy.array([[4, 8, 3],
[7, 2, 3]], dtype=numpy.float32)
self.assertEqual(exp, got['Z'])
@wraplog()
def test_onnxt_runtime_gemm_python(self):
self.do_test_onnxt_runtime_gemm("python")
python_tested.append(OnnxGemm)
@wraplog()
def test_onnxt_runtime_gemm_onnxruntime(self):
self.do_test_onnxt_runtime_gemm("onnxruntime1")
def do_test_onnxt_runtime_gemm(self, runtime):
idi = numpy.array([[1, 0], [1, 1]], dtype=numpy.float32)
cst = numpy.array([4, 5], dtype=numpy.float32)
X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float32)
onx = OnnxGemm('X', idi, cst, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
try:
oinf = OnnxInference(model_def, runtime=runtime)
except RuntimeError as e:
raise RuntimeError(
"Unable to instantiate (runtime='{}')\n{}".format(
runtime, model_def)) from e
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X, idi) + cst, got['Y'], decimal=5)
onx = OnnxGemm('X', idi, cst, transA=1, transB=1, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
try:
oinf = OnnxInference(model_def, runtime=runtime)
except RuntimeError as e:
raise RuntimeError(
"Unable to instantiate (runtime='{}')\n{}".format(
runtime, model_def)) from e
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X.T, idi.T) + cst, got['Y'], decimal=5)
onx = OnnxGemm('X', idi, cst, transA=1, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime=runtime)
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X.T, idi) + cst, got['Y'], decimal=5)
onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime=runtime)
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=5)
onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'],
alpha=numpy.float32(1.),
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime=runtime)
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=5)
if runtime != 'onnxruntime1':
onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'],
alpha=numpy.float32(1.),
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float64)},
target_opset=get_opset_number_from_onnx())
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime=runtime)
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X, idi.T) +
cst, got['Y'], decimal=5)
@wraplog()
def test_onnxt_runtime_global_average_pool(self):
x = x = numpy.random.randn(1, 3, 5, 5).astype(numpy.float32)
y = _global_average_pool(x).astype(numpy.float32)
onx = OnnxGlobalAveragePool(
'X', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': x}, got, OnnxGlobalAveragePool, model_def)
x = numpy.array([[[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]]]).astype(numpy.float32)
y = numpy.array([[[[5]]]]).astype(numpy.float32)
onx = OnnxGlobalAveragePool(
'X', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])
python_tested.append(OnnxGlobalAveragePool)
def test_onnxt_runtime_greater(self):
self.common_test_onnxt_runtime_binary(OnnxGreater, numpy.greater)
@wraplog()
def test_onnxt_runtime_greater_or_equal(self):
self.common_test_onnxt_runtime_binary(
OnnxGreaterOrEqual, numpy.greater_equal)
@wraplog()
def test_onnxt_runtime_identity(self):
self.common_test_onnxt_runtime_unary(OnnxIdentity, lambda x: x)
@wraplog()
def test_onnxt_runtime_isnan(self):
self.common_test_onnxt_runtime_unary(OnnxIsNaN, numpy.isnan)
@wraplog()
def test_onnxt_runtime_less(self):
self.common_test_onnxt_runtime_binary(OnnxLess, numpy.less)
@wraplog()
def test_onnxt_runtime_less_or_equal(self):
self.common_test_onnxt_runtime_binary(
OnnxLessOrEqual, numpy.less_equal)
@wraplog()
def test_onnxt_runtime_log(self):
self.common_test_onnxt_runtime_unary(OnnxLog, numpy.log)
@wraplog()
def test_onnxt_runtime_lp_normalization(self):
onx = OnnxLpNormalization('X', output_names=['Y'], p=2, axis=1,
op_version=get_opset_number_from_onnx())
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float32)
model_def = onx.to_onnx({'X': X},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
exp = numpy.array([[0.4472136, 0.8944272],
[0.6, -0.8]], dtype=numpy.float32)
self.assertEqualArray(got['Y'], exp)
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxLpNormalization, model_def)
onx = OnnxLpNormalization('X', output_names=['Y'], p=2, axis=0,
op_version=get_opset_number_from_onnx())
X = numpy.array([[1, 2], [3, -4]], dtype=numpy.float32)
model_def = onx.to_onnx({'X': X},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
exp = numpy.array([[0.3162278, 0.4472136],
[0.9486833, -0.8944272]], dtype=numpy.float32)
self.assertEqualArray(got['Y'], exp)
python_tested.append(OnnxLpNormalization)
@wraplog()
def test_onnxt_runtime_matmul(self):
self.common_test_onnxt_runtime_binary(OnnxMatMul, lambda x, y: x @ y)
@wraplog()
def test_onnxt_runtime_max(self):
self.common_test_onnxt_runtime_binary(
OnnxMax, lambda x, y: numpy.maximum(x, y))
@wraplog()
def test_onnxt_runtime_max_pool_1d_default(self):
X = numpy.random.randn(1, 3, 32).astype(numpy.float32)
kernel_shape = [2]
strides = [1]
out_shape = _pool_get_output_shape(
b'VALID', X.shape[2:], kernel_shape, strides)
exp = _pool_impl(
X, X.shape, kernel_shape, strides, out_shape, [0], b'MAX')
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=kernel_shape,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
self.assertEqual(got['Y'].dtype, X.dtype)
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxMaxPool, model_def)
@wraplog()
def test_onnxt_runtime_max_pool_1d_default_64(self):
X = numpy.random.randn(1, 3, 32).astype(numpy.float64)
kernel_shape = [2]
strides = [1]
out_shape = _pool_get_output_shape(
b'VALID', X.shape[2:], kernel_shape, strides)
exp = _pool_impl(
X, X.shape, kernel_shape, strides, out_shape, [0], b'MAX')
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=kernel_shape,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'], decimal=5)
self.assertEqual(got['Y'].dtype, X.dtype)
self.assertEqual(got['Y'].dtype, numpy.float64)
@wraplog()
def test_onnxt_runtime_max_pool_2d(self):
# ceil
X = numpy.array([[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]]]).astype(numpy.float32)
exp = numpy.array([[[[11, 12], [15, 16]]]]).astype(numpy.float32)
kernel_shape = [3, 3]
strides = [2, 2]
ceil_mode = True
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=kernel_shape,
strides=strides, ceil_mode=ceil_mode,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
# default
X = numpy.random.randn(1, 3, 32, 32).astype(numpy.float32)
kernel_shape = [2, 2]
strides = [1, 1]
out_shape = _pool_get_output_shape(
b'VALID', X.shape[2:], kernel_shape, strides)
exp = _pool_impl(X, X.shape, kernel_shape, strides,
out_shape, (0, 0), b'MAX')
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=kernel_shape,
strides=strides,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
# dilations
X = numpy.array([[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]]]).astype(numpy.float32)
exp = numpy.array([[[[11, 12], [15, 16]]]]).astype(numpy.float32)
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=[2, 2],
strides=[1, 1], dilations=[2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
# pads
X = numpy.array([[[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]]]).astype(numpy.float32)
exp = numpy.array([[[[13, 14, 15, 15, 15],
[18, 19, 20, 20, 20],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25],
[23, 24, 25, 25, 25]]]]).astype(numpy.float32)
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=[5, 5],
pads=[2, 2, 2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
# precomputed_same_upper(self):
X = numpy.array([[[[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]]]]).astype(numpy.float32)
exp = numpy.array([[[[7, 9, 10],
[17, 19, 20],
[22, 24, 25]]]]).astype(numpy.float32)
onx = OnnxMaxPool('X', output_names=['Y'],
kernel_shape=[3, 3],
strides=[2, 2], auto_pad=b'SAME_UPPER',
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
python_tested.append(OnnxMaxPool)
@wraplog()
def test_onnxt_runtime_max_pool_3d_default(self):
X = numpy.random.randn(1, 3, 32, 32, 32).astype(numpy.float32)
out_shape = _pool_get_output_shape(
b'VALID', X.shape[2:], [2, 2, 2], [1, 1, 1])
onx = OnnxMaxPool(
'X', output_names=['Y'], kernel_shape=[2, 2, 2],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx(
{'X': X}, target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqual([1, 3, 31, 31, 31], list(got['Y'].shape))
try:
exp = _pool_impl(X, X.shape, [2, 2, 2], [
1, 1, 1], out_shape, (0, 0), b'MAX')
except IndexError:
# remaining bug
return
self.assertEqualArray(exp, got['Y'])
@wraplog()
def test_onnxt_runtime_mean(self):
idi = numpy.identity(2, dtype=numpy.float64)
onx = OnnxMean('X', idi, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
X = numpy.array([[1, 2], [3, 4]], dtype=numpy.float64)
oinf = OnnxInference(model_def)
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray((idi + X) / 2, got['Y'], decimal=5)
self.common_expected_shapes_types(
oinf, {'X': X.astype(numpy.float32)}, got,
OnnxMean, model_def)
python_tested.append(OnnxMean)
@wraplog()
def test_onnxt_runtime_min(self):
self.common_test_onnxt_runtime_binary(
OnnxMin, lambda x, y: numpy.minimum(x, y))
@wraplog()
def test_onnxt_runtime_mod(self):
self.common_test_onnxt_runtime_binary(
OnnxMod, lambda x, y: numpy.nan_to_num(numpy.mod(x, y)),
dtype=numpy.int64)
@wraplog()
def test_onnxt_runtime_mul(self):
self.common_test_onnxt_runtime_binary(OnnxMul, lambda x, y: x * y)
@wraplog()
def test_onnxt_runtime_nrg(self):
self.common_test_onnxt_runtime_unary(OnnxNeg, numpy.negative)
@wraplog()
def test_onnxt_runtime_not(self):
self.common_test_onnxt_runtime_unary(OnnxNot, numpy.logical_not)
@wraplog()
def test_onnxt_runtime_or(self):
self.common_test_onnxt_runtime_binary(
OnnxOr, numpy.logical_or, dtype=numpy.bool_)
@wraplog()
def test_onnxt_runtime_pad(self):
data = numpy.array([[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]],
dtype=numpy.float32)
pads = numpy.array([0, 2, 0, 0], dtype=numpy.int64)
constant_value = numpy.array([0.0], dtype=numpy.float32)
exp = numpy.array([[0.0, 0.0, 1.0, 1.2],
[0.0, 0.0, 2.3, 3.4],
[0.0, 0.0, 4.5, 5.7]], dtype=numpy.float32)
onx = OnnxPad(
'data', 'pads', constant_value, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'data': data, 'pads': pads},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'data': data, 'pads': pads})
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'data': data, 'pads': pads}, got,
OnnxPad, model_def)
data = numpy.array([[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]],
dtype=numpy.float32)
pads = numpy.array([0, 2, 0, 0], dtype=numpy.int64)
constant_value = numpy.array([0.0], dtype=numpy.float32)
exp = numpy.array([[1.0, 1.2, 1.0, 1.2],
[2.3, 3.4, 2.3, 3.4],
[4.5, 5.7, 4.5, 5.7]], dtype=numpy.float32)
onx = OnnxPad(
'data', 'pads', output_names=['Y'],
mode='reflect', op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'data': data, 'pads': pads},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'data': data, 'pads': pads})
self.assertEqualArray(exp, got['Y'])
data = numpy.array([[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]],
dtype=numpy.float32)
pads = numpy.array([0, 2, 0, 0], dtype=numpy.int64)
constant_value = numpy.array([0.0], dtype=numpy.float32)
exp = numpy.array([[1.0, 1.0, 1.0, 1.2],
[2.3, 2.3, 2.3, 3.4],
[4.5, 4.5, 4.5, 5.7]], dtype=numpy.float32)
onx = OnnxPad(
'data', 'pads', output_names=['Y'],
mode='edge', op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'data': data, 'pads': pads},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'data': data, 'pads': pads})
self.assertEqualArray(exp, got['Y'])
python_tested.append(OnnxPad)
@wraplog()
def test_onnxt_runtime_pad2(self):
data = numpy.random.randn(1, 3, 4, 5).astype(numpy.float32)
pads = numpy.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(numpy.int64)
constant_value = numpy.array([1.2], dtype=numpy.float32)
exp = _pad_impl(data, pads, 'constant', 1.2)
onx = OnnxPad(
'data', 'pads', constant_value, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'data': data, 'pads': pads},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'data': data, 'pads': pads})
self.assertEqualArray(exp, got['Y'])
for mode in ('edge', 'reflect'):
onx = OnnxPad(
'data', 'pads', output_names=['Y'],
mode=mode, op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'data': data, 'pads': pads},
target_opset=get_opset_number_from_onnx())
data = numpy.random.randn(1, 3, 4, 5).astype(numpy.int32)
pads = numpy.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(numpy.int64)
exp = _pad_impl(data, pads, mode)
oinf = OnnxInference(model_def)
got = oinf.run({'data': data, 'pads': pads})
self.assertEqualArray(exp, got['Y'])
@wraplog()
def test_onnxt_runtime_pow(self):
self.common_test_onnxt_runtime_binary(OnnxPow, numpy.power)
@wraplog()
def test_onnxt_runtime_qlinear_conv(self):
x = numpy.array(
[[255, 174, 162, 25, 203, 168, 58],
[15, 59, 237, 95, 129, 0, 64],
[56, 242, 153, 221, 168, 12, 166],
[232, 178, 186, 195, 237, 162, 237],
[188, 39, 124, 77, 80, 102, 43],
[127, 230, 21, 83, 41, 40, 134],
[255, 154, 92, 141, 42, 148, 247], ],
dtype=numpy.uint8).reshape((1, 1, 7, 7))
x_scale = numpy.float32(0.00369204697)
x_zero_point = numpy.uint8(132)
w = numpy.array([0], dtype=numpy.uint8).reshape((1, 1, 1, 1))
w_scale = numpy.array([0.00172794575], dtype=numpy.float32)
w_zero_point = numpy.array([255], dtype=numpy.uint8)
y_scale = numpy.float32(0.00162681262)
y_zero_point = numpy.uint8(123)
output = numpy.array(
[[0, 81, 93, 230, 52, 87, 197],
[240, 196, 18, 160, 126, 255, 191],
[199, 13, 102, 34, 87, 243, 89],
[23, 77, 69, 60, 18, 93, 18],
[67, 216, 131, 178, 175, 153, 212],
[128, 25, 234, 172, 214, 215, 121],
[0, 101, 163, 114, 213, 107, 8], ],
dtype=numpy.uint8).reshape((1, 1, 7, 7))
node = OnnxQLinearConv('x', 'x_scale', 'x_zero_point', 'w',
'w_scale', 'w_zero_point', 'y_scale',
'y_zero_point', output_names=['y'],
op_version=get_opset_number_from_onnx())
inputs = {'x': x, 'x_scale': x_scale, 'x_zero_point': x_zero_point,
'w': w, 'w_scale': w_scale, 'w_zero_point': w_zero_point,
'y_scale': y_scale, 'y_zero_point': y_zero_point}
model_def = node.to_onnx(inputs,
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run(inputs)
self.assertEqualArray(output, got['y'])
self.common_expected_shapes_types(
oinf, inputs, got, OnnxQLinearConv, model_def)
python_tested.append(OnnxQLinearConv)
@wraplog()
def test_onnxt_runtime_qlinear_conv_test0(self):
x_scale = numpy.float32(0.00369204697)
x_zero_point = numpy.uint8(132)
x = numpy.array(
[[255, 174, 162, 25, 203, 168, 58],
[15, 59, 237, 95, 129, 0, 64],
[56, 242, 153, 221, 168, 12, 166],
[232, 178, 186, 195, 237, 162, 237],
[188, 39, 124, 77, 80, 102, 43],
[127, 230, 21, 83, 41, 40, 134],
[255, 154, 92, 141, 42, 148, 247], ],
dtype=numpy.uint8).reshape((1, 1, 7, 7))
w_scale = numpy.array([0.00172794575], dtype=numpy.float32)
w_zero_point = numpy.array([255], dtype=numpy.uint8)
w = numpy.array([0], dtype=numpy.uint8).reshape((1, 1, 1, 1))
y_scale = numpy.float32(0.00162681262)
y_zero_point = numpy.uint8(123)
y = numpy.array(
[[0, 81, 93, 230, 52, 87, 197],
[240, 196, 18, 160, 126, 255, 191],
[199, 13, 102, 34, 87, 243, 89],
[23, 77, 69, 60, 18, 93, 18],
[67, 216, 131, 178, 175, 153, 212],
[128, 25, 234, 172, 214, 215, 121],
[0, 101, 163, 114, 213, 107, 8], ],
dtype=numpy.uint8).reshape((1, 1, 7, 7))
test_qlinear_conv(
QuantizedTensor(x, x_scale, x_zero_point), (1, 1, 7, 7),
QuantizedTensor(w, w_scale, w_zero_point), (1, 1, 1, 1),
None,
QuantizedTensor(y, y_scale, y_zero_point), (1, 1, 7, 7))
@wraplog()
def test_onnxt_runtime_qlinear_conv_2dtest(self):
x = QuantizedTensor(numpy.array([
0.45246148109436035, 0.15498268604278564, 0.11199361085891724, -0.39421093463897705,
0.2626858949661255, 0.13414543867111206, -
0.27184486389160156, -0.43028733134269714,
-0.26825493574142456, 0.3893144130706787, -
0.13631996512413025, -0.009590476751327515,
-0.48771554231643677, -0.25256502628326416, -
0.2812897562980652, 0.4043201804161072,
0.07795023918151855, 0.326981782913208, 0.13114392757415771, -0.4416425824165344,
0.12446999549865723, 0.36739975214004517, 0.1698915958404541, 0.2008744478225708,
0.23339951038360596, 0.38613730669021606, 0.11117297410964966, 0.3877097964286804,
0.20812749862670898, -0.34297940135002136, -
0.029246658086776733, -0.20483523607254028,
-0.19244328141212463, -0.11104947328567505, -
0.32830488681793213, -0.01800677180290222,
0.3618946671485901, -0.40949052572250366, -
0.18248388171195984, -0.3349453806877136,
-0.34091079235076904, 0.006497859954833984, 0.4537564516067505, 0.08006560802459717,
-0.14788749814033508, 0.034442365169525146, -
0.33322954177856445, 0.06049239635467529,
0.42619407176971436], dtype=numpy.float32))
w = QuantizedTensor(numpy.array(
[-0.4406261742115021], dtype=numpy.float32))
y = QuantizedTensor(numpy.array([
-0.19936637580394745, -0.06828942894935608, -
0.04934731498360634, 0.17369966208934784,
-0.11574628204107285, -0.05910799279808998, 0.1197819635272026, 0.18959586322307587,
0.1182001456618309, -0.17154212296009064, 0.06006614491343498, 0.0042258151806890965,
0.21490024030208588, 0.11128675937652588, 0.12394362688064575, -0.17815405130386353,
-0.034346915781497955, -0.14407673478126526, -
0.05778544768691063, 0.19459928572177887,
-0.05484473705291748, -0.16188594698905945, -
0.07485868036746979, -0.08851054310798645,
-0.10284193605184555, -0.17014220356941223, -
0.04898572340607643, -0.17083507776260376,
-0.09170642495155334, 0.1511256992816925, 0.012886842712759972, 0.09025576710700989,
0.08479554951190948, 0.0489313043653965, 0.14465972781181335, 0.007934254594147205,
-0.15946026146411896, 0.1804322451353073, 0.08040717244148254, 0.1475857049226761,
0.15021422505378723, -0.0028631272725760937, -
0.19993697106838226, -0.03527900204062462,
0.06516310572624207, -0.015176207758486271, 0.14682966470718384, -0.02665453404188156,
-0.18779225647449493], dtype=numpy.float32))
test_qlinear_conv(x, (1, 1, 7, 7), w, (1, 1, 1, 1),
None, y, (1, 1, 7, 7),
opset=10)
@wraplog()
def test_onnxt_runtime_qlinear_conv_3dtest(self):
x = QuantizedTensor(numpy.array([
0.010772407054901123, -0.43806642293930054, 0.455391526222229, -0.28657248616218567,
0.45676887035369873, -0.0320507287979126, 0.4229400157928467, -0.18730869889259338,
-0.45851585268974304, 0.042054951190948486, -
0.13332295417785645, -0.25374430418014526,
-0.23845627903938293, 0.12214112281799316, -
0.1778157651424408, 0.1891845464706421,
0.37962496280670166, -0.033982306718826294, 0.12737131118774414, -0.040284961462020874,
0.46427029371261597, -0.22687292098999023, 0.17398333549499512, -0.3014046251773834,
-0.4043419063091278, -0.33206477761268616, 0.04655301570892334, -0.4947906732559204,
0.0755157470703125, 0.1173025369644165, 0.47043120861053467, 0.4824737310409546,
-0.37734976410865784, -0.056491583585739136, -
0.10790631175041199, 0.043476223945617676,
0.24469023942947388, -0.4100031852722168, 0.0616222620010376, 0.2296960949897766,
0.27883386611938477, 0.08150351047515869, 0.2453773021697998, 0.08250969648361206,
-0.1471814215183258, -0.43011274933815, 0.027180075645446777, 0.3605625033378601,
0.24954384565353394, -0.22505927085876465, -
0.36272895336151123, -0.47674262523651123,
0.11275297403335571, 0.49773406982421875, 0.2686365246772766, 0.025525271892547607,
-0.3037869930267334, 0.41126757860183716, 0.36149072647094727, 0.00883406400680542,
-0.07959523797035217, 0.3601323366165161, 0.17322391271591187, -0.012007325887680054], dtype=numpy.float32))
w = QuantizedTensor(numpy.array(
[0.32824617624282837], dtype=numpy.float32))
y = QuantizedTensor(numpy.array([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0035360013134777546, 0.14948052167892456, 0.0,
0.0, -0.15050607919692993, -0.043762750923633575, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, -0.12386361509561539, -0.03541983291506767, 0.0,
0.0, 0.09152615070343018, 0.08054415881633759, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=numpy.float32))
test_qlinear_conv(x, (1, 1, 4, 4, 4), w, (1, 1, 1, 1, 1),
None, y, (1, 1, 4, 4, 4),
opset=10,
pads=[2, 2, 2, 2, 2, 2],
strides=[2, 2, 2])
@wraplog()
def test_onnxt_runtime_qlinear_conv_2dtest_with_bias(self):
x = QuantizedTensor(numpy.array([
6, 81, 214, 151, 234, 42, 50, 89, 30, 91, 125, 141, 52, 31, 58, 224, 84, 251, 67, 137,
223, 119, 79, 220, 249, 75, 131, 246, 113, 56, 54, 197, 110, 142, 126, 171, 53, 228,
240, 83, 229, 218, 185, 9, 80, 116, 176, 193, 175, 253], dtype=numpy.uint8),
0.01, 135)
w = QuantizedTensor(numpy.array([
234, 229, 13, 187, 98, 161, 246, 188, 252, 107, 49, 72, 53, 212, 175, 47, 21, 14, 86,
230, 16, 177, 82, 166, 75, 220, 169, 119, 34, 205, 27, 9, 44, 74, 40, 8, 28, 139, 240,
106, 63, 2, 255, 156, 128, 222, 73, 51, 66, 48, 81, 247, 180, 91, 206, 239, 190, 146,
227, 235, 10, 130, 95, 232, 121, 133, 231, 162, 108, 105, 254, 143], dtype=numpy.uint8),
0.15, 110)
y = QuantizedTensor(numpy.array([
67, 81, 66, 75, 71, 101, 20, 8, 44, 94, 83, 73, 133, 125, 54, 144, 165, 56, 53, 88,
130, 118, 170, 168, 140, 109, 103, 80, 122, 142, 129, 100, 39, 61, 141, 133, 59, 155,
68, 129, 74, 132, 83, 143, 146, 152, 81, 127, 82, 112, 131, 64, 82, 68, 93, 149, 146,
137, 201, 118, 112, 183, 171, 144, 85, 122, 86, 63, 163, 245, 95, 152, 126, 80, 82,
49, 136, 160, 187, 147, 29, 20, 135, 174, 126, 124, 36, 56, 0, 83, 134, 171, 119, 109,
85, 155, 157, 167, 194, 130], dtype=numpy.uint8), 0.75, 121)
b = QuantizedBiasTensor(
numpy.array([-1123, 3212, 1723, -621], dtype=numpy.int32),
x.scale_ * w.scale_)
test_qlinear_conv(x, (1, 2, 5, 5), w, (4, 2, 3, 3),
b, y, (1, 4, 5, 5),
opset=10,
pads=[1, 1, 1, 1])
@wraplog()
def test_onnxt_runtime_qlinear_conv_2dtest_with_group(self):
x = QuantizedTensor(numpy.array([
98, 166, 219, 195, 46, 97, 27, 211, 239, 1, 28, 208, 143, 144, 215, 252, 79, 5, 154,
56, 122, 191, 94, 25, 221, 48, 37, 182, 68, 245, 210, 206, 183, 22, 163, 104, 242,
112, 161, 66, 181, 235, 117, 75, 236, 61, 115, 36, 120, 253, 165, 214, 159, 132, 11,
201, 30, 249, 89, 171, 186, 67, 225, 197, 135, 142, 241, 169, 170, 164, 178, 58, 50,
51, 200, 43, 199, 126, 222, 123, 227, 42, 3, 21, 124, 220, 24, 47, 63, 110], dtype=numpy.uint8),
0.01, 135)
w = QuantizedTensor(numpy.array([
220, 111, 73, 254, 235, 151, 6, 156, 129, 204, 234, 198, 44, 89, 202, 82, 118, 189,
71, 120, 123, 121, 110, 83, 173, 248, 108, 229, 124, 68, 85, 239, 133, 213, 112, 122,
170, 231, 225, 195, 192, 9, 232, 97, 160, 227, 67, 137], dtype=numpy.uint8),
0.15, 110)
y = QuantizedTensor(numpy.array([
113, 128, 70, 64, 125, 162, 80, 189, 112, 147, 121, 111, 96, 68, 94, 101, 77, 88, 223,
128, 163, 194, 138, 164, 122, 109, 117, 91, 72, 121, 134, 155, 127, 125, 98, 128], dtype=numpy.uint8),
0.75, 121)
b = QuantizedBiasTensor(
numpy.array([-1853, 598, -17854, 14592, 42, -366],
dtype=numpy.int32),
x.scale_ * w.scale_)
test_qlinear_conv(x, (1, 6, 3, 5), w, (6, 2, 2, 2),
b, y, (1, 6, 2, 3),
opset=10,
pads=[0, 0, 1, 1],
group=3,
strides=[2, 2])
@wraplog()
def test_onnxt_runtime_quantize_linear(self):
X = numpy.array([[[[-162, 10], [-100, 232], [-20, -50]],
[[-76, 0], [0, 252], [32, -44]],
[[245, -485], [-960, -270], [-375, -470]], ], ],
dtype=numpy.float32)
y_scale = numpy.array([2, 4, 5], dtype=numpy.float32)
y_zero_point = numpy.array([84, 24, 196], dtype=numpy.uint8)
exp = ((X / y_scale.reshape((1, 3, 1, 1)) +
y_zero_point.reshape((1, 3, 1, 1))).astype(numpy.uint8))
onx = OnnxQuantizeLinear(
'X', y_scale, y_zero_point, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
self.common_expected_shapes_types(
oinf, {'X': X}, got, OnnxQuantizeLinear, model_def)
X = numpy.array([0, 2, 4, 1000, -254, -1000]).astype(numpy.float32)
y_scale = numpy.array([2], dtype=numpy.float32)
y_zero_point = numpy.array([128], dtype=numpy.uint8)
exp = numpy.array([128, 129, 130, 255, 1, 0]).astype(numpy.uint8)
onx = OnnxQuantizeLinear(
'X', y_scale, y_zero_point, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['Y'])
python_tested.append(OnnxQuantizeLinear)
@wraplog()
def test_onnxt_runtime_range(self):
starts = numpy.array([0], dtype=numpy.float32)
ends = numpy.array([10], dtype=numpy.float32)
steps = numpy.array([4], dtype=numpy.float32)
onx = OnnxRange(
'starts', 'ends', steps, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'starts': starts, 'ends': ends},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
exp = | numpy.array([0, 4, 8], dtype=numpy.float32) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time, os
import fabio
import pyopencl as cl
from pylab import *
from pyFAI.third_party import six
print("#"*50)
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from pyFAI.test.utilstest import UtilsTest
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import splitPixelFull
from pyFAI import ocl_hist_pixelsplit
# from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxCSR
os.chdir("testimages")
ai = pyFAI.load("halfccd.poni")
data = fabio.open("halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg", scale=False)
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
size = data.size
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.Buffer(ctx, mf.READ_WRITE, 4 * 4 * workgroup_size)
d_minmax = cl.Buffer(ctx, mf.READ_WRITE, 4 * 4)
with open("../../openCL/ocl_hist_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%f" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult, d_minmax)
result = numpy.ndarray(4, dtype=numpy.float32)
cl.enqueue_copy(queue, result, d_minmax)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax = (min0, max0, min1, max1)
print(minmax)
print(result)
d_outData = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)
d_outCount = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)
d_outMerge = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)
program.memset_out(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue, outData, d_outData)
cl.enqueue_copy(queue, outCount, d_outCount)
cl.enqueue_copy(queue, outMerge, d_outMerge)
global_size = (data.size + workgroup_size - 1) & ~(workgroup_size - 1),
d_image = cl.array.to_device(queue, data)
d_image_float = cl.Buffer(ctx, mf.READ_WRITE, 4 * size)
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # halfccd
program.integrate1(queue, global_size, (workgroup_size,), d_pos.data, d_image_float, d_minmax, | numpy.int32(data.size) | numpy.int32 |
#!/usr/bin/env python3
import numpy as np
from numpy import linalg
import matplotlib.pyplot as plt
import rospy
import sys
import cv2
import glob
import time
import rospy
import actionlib
from cv_bridge import CvBridge
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal, JointTrajectoryControllerState, FollowJointTrajectoryActionGoal
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from sensor_msgs.msg import Image
#import roslib
import tf
#import geometry_msgs.msg
import tf2_msgs.msg
#import turtlesim.srv
import tensorflow as tf2
import tensorflow_hub as hub
from openpose.body.estimator import BodyPoseEstimator
from openpose.utils import draw_body_connections, draw_keypoints
import cmath
import math
from math import cos as cos
from math import sin as sin
from math import atan2 as atan2
from math import acos as acos
from math import asin as asin
from math import sqrt as sqrt
from math import pi as pi
#from mpl_toolkits.mplot3d import Axes3D
global tmp1, tmp2
global points1
global points2
global points3
global points4
global points5
global points6
global points7
global points8
global points9
global points10
global points11
global points12
global height_limiter
global angle1, angle2, angle3
global angle4, angle5, angle6
global image_height
global tmp5, tmp6
global fulltime1, fulltime2
tmp5=0
tmp6=0
fulltime1=0
fulltime2=0
#CALIBRATION FUNCTIONS
def calibrate_camera(images_folder):
images_names = glob.glob(images_folder)
images = []
for imname in images_names:
im = cv2.imread(imname, 1)
images.append(im)
#criteria used by checkerboard pattern detector.
#Change this if the code can't find the checkerboard
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
rows = 9 #number of checkerboard rows.
columns = 13 #number of checkerboard columns.
world_scaling = 1. #change this to the real world square size. Or not.
#coordinates of squares in the checkerboard world space
objp = np.zeros((rows*columns,3), np.float32)
objp[:,:2] = np.mgrid[0:rows,0:columns].T.reshape(-1,2)
objp = world_scaling* objp
#frame dimensions. Frames should be the same size.
width = images[0].shape[1]
height = images[0].shape[0]
#Pixel coordinates of checkerboards
imgpoints = [] # 2d points in image plane.
#coordinates of the checkerboard in checkerboard world space.
objpoints = [] # 3d point in real world space
for frame in images:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#find the checkerboard
ret, corners = cv2.findChessboardCorners(gray, (rows, columns), None)
if ret == True:
#Convolution size used to improve corner detection. Don't make this too large.
conv_size = (11, 11)
#opencv can attempt to improve the checkerboard coordinates
corners = cv2.cornerSubPix(gray, corners, conv_size, (-1, -1), criteria)
cv2.drawChessboardCorners(frame, (rows,columns), corners, ret)
cv2.imshow('img', frame)
cv2.waitKey(500)
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (width, height), None, None)
print('rmse:', ret)
print('camera matrix:\n', mtx)
print('distortion coeffs:', dist)
print('Rs:\n', rvecs)
print('Ts:\n', tvecs)
return mtx, dist
def stereo_calibrate(mtx1, dist1, mtx2, dist2):
#read the synched frames
c1_images_names = glob.glob('leftCalib/*')
c1_images_names = sorted(c1_images_names)
c2_images_names = glob.glob('rightCalib/*')
c2_images_names = sorted(c2_images_names)
c1_images = []
c2_images = []
for im1, im2 in zip(c1_images_names, c2_images_names):
_im = cv2.imread(im1, 1)
c1_images.append(_im)
_im = cv2.imread(im2, 1)
c2_images.append(_im)
#change this if stereo calibration not good.
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
rows = 9 #number of checkerboard rows.
columns = 13 #number of checkerboard columns.
world_scaling = 1. #change this to the real world square size. Or not.
#coordinates of squares in the checkerboard world space
objp = np.zeros((rows*columns,3), np.float32)
objp[:,:2] = np.mgrid[0:rows,0:columns].T.reshape(-1,2)
objp = world_scaling* objp
#frame dimensions. Frames should be the same size.
width = c1_images[0].shape[1]
height = c1_images[0].shape[0]
#Pixel coordinates of checkerboards
imgpoints_left = [] # 2d points in image plane.
imgpoints_right = []
#coordinates of the checkerboard in checkerboard world space.
objpoints = [] # 3d point in real world space
for frame1, frame2 in zip(c1_images, c2_images):
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
c_ret1, corners1 = cv2.findChessboardCorners(gray1, (9, 13), None)
c_ret2, corners2 = cv2.findChessboardCorners(gray2, (9, 13), None)
if c_ret1 == True and c_ret2 == True:
corners1 = cv2.cornerSubPix(gray1, corners1, (11, 11), (-1, -1), criteria)
corners2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), criteria)
cv2.drawChessboardCorners(frame1, (9,13), corners1, c_ret1)
cv2.imshow('img', frame1)
cv2.drawChessboardCorners(frame2, (9,13), corners2, c_ret2)
cv2.imshow('img2', frame2)
cv2.waitKey(500)
objpoints.append(objp)
imgpoints_left.append(corners1)
imgpoints_right.append(corners2)
stereocalibration_flags = cv2.CALIB_FIX_INTRINSIC
ret, CM1, dist1, CM2, dist2, R, T, E, F = cv2.stereoCalibrate(objpoints, imgpoints_left, imgpoints_right, mtx1, dist1, mtx2, dist2, (width, height), criteria = criteria, flags = stereocalibration_flags)
print(ret)
return R, T
def DLT(P1, P2, point1, point2):
A = [point1[1]*P1[2,:] - P1[1,:],
P1[0,:] - point1[0]*P1[2,:],
point2[1]*P2[2,:] - P2[1,:],
P2[0,:] - point2[0]*P2[2,:]
]
A = np.array(A).reshape((4,4))
#print('A: ')
#print(A)
B = A.transpose() @ A
from scipy import linalg
U, s, Vh = linalg.svd(B, full_matrices = False)
print('Triangulated point: ')
print(Vh[3,0:3]/Vh[3,3])
return Vh[3,0:3]/Vh[3,3]
mtx1, dist1 = calibrate_camera(images_folder = 'leftCalib/*')
mtx2, dist2 = calibrate_camera(images_folder = 'rightCalib/*')
R, T = stereo_calibrate(mtx1, dist1, mtx2, dist2)
#RT matrix for C1 is identity.
RT1 = np.concatenate([np.eye(3), [[0],[0],[0]]], axis = -1)
P1 = mtx1 @ RT1 #projection matrix for C1
#RT matrix for C2 is the R and T obtained from stereo calibration.
RT2 = np.concatenate([R, T], axis = -1)
P2 = mtx2 @ RT2 #projection matrix for C2
#KEYPOINTS DETECTION
#loading model
rospy.loginfo('Loading model')
estimator = BodyPoseEstimator(pretrained=True)
model = hub.load('https://tfhub.dev/google/movenet/singlepose/thunder/4')
#model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')
movenet = model.signatures['serving_default']
rospy.loginfo('Model loaded')
#Callbacks containing images from cameras detecting keypoints on given image
bridge = CvBridge()
def process_image_cam1(msg):
time_movecam1 = time.time()
global tmp1, tmp3, tmp5, fulltime1
global points2
global points4
global points6
global points8
global points10
global points12
global image_height
img = bridge.imgmsg_to_cv2(msg, "bgr8")
'''
h1, w1 = img.shape[:2]
print("wymiary kamery cam1:")
print(w1)
print(h1)
print("koniec kamery cam1:")
'''
y, x, _ = img.shape
image_height = y
# A frame of video or an image, represented as an int32 tensor of shape: 256x256x3. Channels order: RGB with values in [0, 255].
tf_img = cv2.resize(img, (256,256))
tf_img = cv2.cvtColor(tf_img, cv2.COLOR_BGR2RGB)
tf_img = np.asarray(tf_img)
tf_img = np.expand_dims(tf_img,axis=0)
# Resize and pad the image to keep the aspect ratio and fit the expected size.
image = tf2.cast(tf_img, dtype=tf2.int32)
# Run model inference.
outputs = movenet(image)
# Output is a [1, 1, 17, 3] tensor.
keypoints = outputs['output_0']
#keypoints = keypoints.numpy()
#print(keypoints)
keypoints2 = keypoints.numpy()
shoulder_y = int(keypoints2[0, 0, 6, 0]* y)
shoulder_x = int( keypoints2[0, 0, 6, 1]* x)
elbow_y = int(keypoints2[0, 0, 8, 0]* y)
elbow_x = int(keypoints2[0, 0, 8, 1]* x)
wrist_y = int(keypoints2[0, 0, 10, 0]* y)
wrist_x = int(keypoints2[0, 0, 10, 1]* x)
hand1_y = int(keypoints2[0, 0, 5, 0]* y)
hand1_x = int( keypoints2[0, 0, 5, 1]* x)
hand2_y = int(keypoints2[0, 0, 7, 0]* y)
hand2_x = int(keypoints2[0, 0, 7, 1]* x)
hand3_y = int(keypoints2[0, 0, 9, 0]* y)
hand3_x = int(keypoints2[0, 0, 9, 1]* x)
#keypoints = estimator(img)
points2=np.array([shoulder_x,shoulder_y])
points4=np.array([elbow_x,elbow_y])
points6=np.array([wrist_x,wrist_y])
points8 = np.array([hand1_x, hand1_y])
points10 = np.array([hand2_x, hand2_y])
points12 = np.array([hand3_x, hand3_y])
#points2=np.array([keypoints[0][2][0],keypoints[0][2][1]])
#points4=np.array([keypoints[0][3][0],keypoints[0][3][1]])
#points6=np.array([keypoints[0][4][0],keypoints[0][4][1]])
'''
print("keypoints:")
print(keypoints[0][2][0]) # x shoudler
print(keypoints[0][2][1]) # y
print(keypoints[0][3][0]) # x elbow
print(keypoints[0][3][1]) # y
print(keypoints[0][4][0]) # x wrist
print(keypoints[0][4][1]) # y
'''
#image_dst = draw_body_connections(img, keypoints, thickness=4, alpha=0.7)
#image_dst = draw_keypoints(image_dst, keypoints, radius=5, alpha=0.8)
for k in keypoints[0,0,:,:]:
# Converts to numpy array
k = k.numpy()
yc = int(k[0] * y)
xc = int(k[1] * x)
image_dst = cv2.circle(img, (xc, yc), 2, (0, 255, 0), 5)
image_message = bridge.cv2_to_imgmsg(image_dst, "bgr8")
#print("publikacja cam1")
image_pub_cam1.publish(image_message)
image_message = 0
if points2[0]!=0.0 and points2[1]!=0.0 and points4[0]!=0.0 and points4[1]!=0.0 and points6[0]!=0.0 and points6[1]!=0.0:
tmp1 = 1
time_movecam11 = time.time()
time_interval_cam1 = time_movecam11 - time_movecam1
if tmp5 < 200:
fulltime1= fulltime1+time_interval_cam1
tmp5=tmp5+1
elif tmp5 == 200:
print(fulltime1)
#print("camera 1 feedback time:")
#print(time_interval_cam1)
tmp3=1
def process_image_cam2(msg):
global angle1, angle2, angle3
global angle4, angle5, angle6
global points1
global points2
global points3
global points4
global points5
global points6
global points7
global points8
global points9
global points10
global points11
global points12
time_movecam2 = time.time()
global tmp2, tmp4, tmp1, tmp6, fulltime2
global points1
global points3
global points5
global points7
global points9
global points11
img = bridge.imgmsg_to_cv2(msg, "bgr8")
scale_percent = 44.5 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
img = img[:, 107:747]
y, x, _ = img.shape
# A frame of video or an image, represented as an int32 tensor of shape: 256x256x3. Channels order: RGB with values in [0, 255].
tf_img = cv2.resize(img, (256,256))
tf_img = cv2.cvtColor(tf_img, cv2.COLOR_BGR2RGB)
tf_img = np.asarray(tf_img)
tf_img = np.expand_dims(tf_img,axis=0)
# Resize and pad the image to keep the aspect ratio and fit the expected size.
image = tf2.cast(tf_img, dtype=tf2.int32)
# Run model inference.
outputs = movenet(image)
# Output is a [1, 1, 17, 3] tensor.
keypoints = outputs['output_0']
#keypoints = keypoints.numpy()
#print(keypoints)
keypoints2 = keypoints.numpy()
shoulder_y = int(keypoints2[0, 0, 6, 0]* y)
shoulder_x = int( keypoints2[0, 0, 6, 1]* x)
elbow_y = int(keypoints2[0, 0, 8, 0]* y)
elbow_x = int(keypoints2[0, 0, 8, 1]* x)
wrist_y = int(keypoints2[0, 0, 10, 0]* y)
wrist_x = int(keypoints2[0, 0, 10, 1]* x)
hand1_y = int(keypoints2[0, 0, 5, 0]* y)
hand1_x = int( keypoints2[0, 0, 5, 1]* x)
hand2_y = int(keypoints2[0, 0, 7, 0]* y)
hand2_x = int(keypoints2[0, 0, 7, 1]* x)
hand3_y = int(keypoints2[0, 0, 9, 0]* y)
hand3_x = int(keypoints2[0, 0, 9, 1]* x)
#keypoints = estimator(img)
points1=np.array([shoulder_x,shoulder_y])
points3=np.array([elbow_x,elbow_y])
points5=np.array([wrist_x,wrist_y])
points7 = np.array([hand1_x, hand1_y])
points9 = np.array([hand2_x, hand2_y])
points11 = np.array([hand3_x, hand3_y])
#points1=np.array([keypoints[0][2][0],keypoints[0][2][1]])
#points3=np.array([keypoints[0][3][0],keypoints[0][3][1]])
#points5=np.array([keypoints[0][4][0],keypoints[0][4][1]])
'''
print("keypoints:")
print(keypoints[0][2][0]) # x shoudler
print(keypoints[0][2][1]) # y
print(keypoints[0][3][0]) # x elbow
print(keypoints[0][3][1]) # y
print(keypoints[0][4][0]) # x wrist
print(keypoints[0][4][1]) # y
'''
#image_dst = draw_body_connections(img, keypoints, thickness=4, alpha=0.7)
#image_dst = draw_keypoints(image_dst, keypoints, radius=5, alpha=0.8)
for k in keypoints[0,0,:,:]:
# Converts to numpy array
k = k.numpy()
yc = int(k[0] * y)
xc = int(k[1] * x)
image_dst = cv2.circle(img, (xc, yc), 2, (0, 255, 0), 5)
image_message = bridge.cv2_to_imgmsg(image_dst, "bgr8")
#print("publikacja cam2")
image_pub_cam2.publish(image_message)
image_message = 0
if points1[0]!=0.0 and points1[1]!=0.0 and points3[0]!=0.0 and points3[1]!=0.0 and points5[0]!=0.0 and points5[1]!=0.0:
tmp2=1
time_movecam22 = time.time()
time_interval_cam2 = time_movecam22 - time_movecam2
print("iteration: ")
print(tmp6)
if tmp6 < 1000:
fulltime2= fulltime2+time_interval_cam2
tmp6=tmp6+1
elif tmp6 == 1000:
print(fulltime2)
#print("camera 2 feedback time:")
#print(time_interval_cam2)
tmp4=1
print("Conditions for triangulation check:")
print(tmp1)
print(tmp2)
#p3ds = []
if tmp1!=0 and tmp2!=0:
p_shoulder = DLT(P1, P2, points1, points2)
p_elbow = DLT(P1, P2, points3, points4)
p_wrist = DLT(P1, P2, points5, points6)
p_hand1 = DLT(P1, P2, points7, points8)
p_hand2 = DLT(P1, P2, points9, points10)
p_hand3 = DLT(P1, P2, points11, points12)
x1=p_elbow[0]-p_shoulder[0]
y1=p_elbow[1]-p_shoulder[1]
z1=p_elbow[2]-p_shoulder[2]
x2=p_wrist[0]-p_elbow[0]
y2=p_wrist[1]-p_elbow[1]
z2=p_wrist[2]-p_elbow[2]
x3=p_hand2[0]-p_hand1[0]
y3=p_hand2[1]-p_hand1[1]
z3=p_hand2[2]-p_hand1[2]
x4=p_hand3[0]-p_hand2[0]
y4=p_hand3[1]-p_hand2[1]
z4=p_hand3[2]-p_hand2[2]
#fig = plt.figure(figsize=(4,4))
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(p_shoulder[0], p_shoulder[1], p_shoulder[2], color='red')
#ax.scatter(p_elbow[0], p_elbow[1], p_elbow[2], color='blue')
#ax.scatter(p_wrist[0], p_wrist[1], p_wrist[2], color='green')
#plt.show()
angle1=asin(y1/sqrt((x1*x1)+(y1*y1)+(z1*z1)))
if x1 > 0:
angle2=atan2(z1,x1)
elif x1 < 0:
angle2=atan2(z1,x1)
else :
angle2=pi/2
angle3=asin(y2/sqrt((x2*x2)+(y2*y2)+(z2*z2)))
angle4 = asin(z3 / sqrt((x3 * x3) + (y3 * y3) + (z3 * z3)))
if x1 > 0:
angle5 = atan2(y3, x3)
elif x1 < 0:
angle5 = atan2(y3, x3) + pi
else:
angle5 = pi / 2
angle6 = asin(z4 / sqrt((x4 * x4) + (y4 * y4) + (z4 * z4)))
angle1=-angle1
angle2=angle2
angle3=-angle3
angle4=angle4
angle5=angle5/1.5
angle6=angle6
print("Calculated angles for the move order:")
print(angle1)
print(angle2)
print(angle3)
print(angle4)
print(angle5)
print(angle6)
tmp1=0
tmp2=0
ArmSimpleTrajectory()
#UR5 CONTROL
#[moveA1, moveA2,moveA3,moveA4,moveA5,moveA6]=[2.9, -3.14, 0.0, -1.57, 1.5, -1.57]
[moveA1, moveA2,moveA3,moveA4,moveA5,moveA6]=[0.0, pi/2, 0.0, 0.0, 0.0, 0.0]
class ArmSimpleTrajectory:
def __init__(self):
time_move1 = time.time()
global moveA1,moveA2,moveA3,moveA4,moveA5,moveA6,height_limiter, position_limiter_x, position_limiter_y
global angle1, angle2, angle3, angle4, angle5, angle6
# UR5 joint names
arm_joints = ['right_arm_shoulder_pan_joint',
'right_arm_shoulder_lift_joint',
'right_arm_elbow_joint',
'right_arm_wrist_1_joint',
'right_arm_wrist_2_joint',
'right_arm_wrist_3_joint']
if height_limiter<0.9 :
print("Danger! Tcp is too close to base! Returning to starting position...")
[angle1, angle2, angle3,moveA4,moveA5,moveA6]=[0.0, 0.0, 0.0, -1.57, 1.5, -1.57]
if position_limiter_x < 0.1 and position_limiter_y > -0.4 :
print("Danger! Tcp is too close to base! Returning to starting position...")
[angle1, angle2,angle3,moveA4,moveA5,moveA6]=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
order1 = moveA1+angle1
order2 = angle2
order3 = moveA3+angle3
if order1 > pi:
order1 = moveA1+angle1-2*pi
elif order1 < -pi:
order1 = moveA1+angle1+2*pi
else :
order1 = moveA1+angle1
if order2 > pi:
order2 = angle2-2*pi
elif order2 < -pi:
order2 = angle2+2*pi
else :
order2 = angle2
if order3 > pi:
order3 = angle3-2*pi
elif order3 < -pi:
order3 = angle3+2*pi
else :
order3 = angle3
if order1 > 3.5:
order1 = 3.5
if order1 < -1.25:
order1 = -1.25
arm_goal = [order1, order2, order3, moveA4+angle4, moveA5+angle5, moveA6+angle6]
# Connect to the right arm trajectory action server
#rospy.loginfo('Waiting for ur arm trajectory controller...')
#arm_client = actionlib.SimpleActionClient('arm_controller/scaled_pos_joint_traj_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
#arm_client.wait_for_server()
#rospy.loginfo('...connected.')
#rospy.sleep(1)
# Create a single-point arm trajectory with the arm_goal as the end-point
arm_trajectory = JointTrajectory()
arm_trajectory.joint_names = arm_joints
arm_trajectory.points.append(JointTrajectoryPoint())
arm_trajectory.points[0].positions = arm_goal
arm_trajectory.points[0].velocities = [0.0 for i in arm_joints]
arm_trajectory.points[0].accelerations = [0.0 for i in arm_joints]
arm_trajectory.points[0].time_from_start = rospy.Duration(3)
# Send the trajectory to the arm action server
#rospy.loginfo('Moving the arm to goal position...')
#rospy.sleep(1)
# Create an empty trajectory goal
arm_goal = FollowJointTrajectoryGoal()
# Set the trajectory component to the goal trajectory created above
arm_goal.trajectory = arm_trajectory
# Specify zero tolerance for the execution time
arm_goal.goal_time_tolerance = rospy.Duration(0)
arm_goal_action = FollowJointTrajectoryActionGoal()
arm_goal_action.goal = arm_goal
time_move2 = time.time()
rospy.loginfo("Moving an arm now")
arm_goal_pub.publish(arm_goal_action)
#rospy.sleep(1)
time_move3 = time.time()
time_interval_move1 = time_move2 - time_move1
#print("create order time:")
#print(time_interval_move1)
time_interval_move2 = time_move3 - time_move2
#print("send order time:")
#print(time_interval_move2)
#Main loop
itmp=0
image_pub_cam1 = rospy.Publisher("keypoints_image_cam1",Image, queue_size=1000)
image_pub_cam2 = rospy.Publisher("keypoints_image_cam2",Image, queue_size=1000)
arm_goal_pub = rospy.Publisher("/arm_controller/scaled_pos_joint_traj_controller/follow_joint_trajectory/goal",FollowJointTrajectoryActionGoal ,queue_size=1)
rospy.init_node('teleop_arm')
rospy.loginfo('teleop_arm node started')
if __name__ == '__main__':
global tmp1, tmp2, tmp3, tmp4
global angle1, angle2, angle3
global angle4, angle5, angle6
[angle1, angle2, angle3]=[0.0, 0.0, 0.0]
[angle4, angle5, angle6] = [0.0, 0.0, 0.0]
global points1
global points2
global points3
global points4
global points5
global points6
global points7
global points8
global points9
global points10
global points11
global points12
points6=np.array([0, 0])
points5=np.array([0, 0])
points1=np.array([0, 0])
points2=np.array([0, 0])
points3=np.array([0, 0])
points4=np.array([0, 0])
points7=np.array([0, 0])
points8= | np.array([0, 0]) | numpy.array |
# https://blog.csdn.net/weixin_30532973/article/details/97703470
# python
import math
import time
import numpy as np
import cyStdDev
def pyStdDev(a):
mean = sum(a) / len(a)
return math.sqrt((sum(((x - mean)**2 for x in a)) / len(a)))
def npStdDev(a):
return np.std(a)
def test():
num = 1000000
a1 = [float(v) for v in range(num)]
t_py = time.time()
py_result = pyStdDev(a1)
print("t_py: ", time.time()-t_py)
print("t_py result: ", py_result)
# ------------------------
print("*"*30)
a2 = np.arange(num)
t_np = time.time()
np_result = npStdDev(a2)
print("t_np: ", time.time()-t_np)
print("t_np result: ", np_result)
# ------------------------
print("*"*30)
a = np.arange(num)
t_cy = time.time()
cy_result = cyStdDev.cyStdDev(a)
print("t_cy: ", time.time()-t_cy)
print("cy result: ", cy_result)
# -----------------------
print("*"*30)
a3 = np.arange(num)
t_cy_np = time.time()
cy_np_result = cyStdDev.npStdDev(a3)
print("t_cy_np: ", time.time()-t_cy_np)
print("cy_np result: ", cy_np_result)
# ------------------------
print("*"*30)
a4 = | np.arange(num) | numpy.arange |
"""General functions for mathematical and numerical operations.
Functions
---------
- spline - Create a general spline interpolation function.
- cumtrapz_loglog - Perform a cumulative integral in log-log space.
- extend - Extend the given array by extraplation.
- sampleInverse - Find x-sampling to evenly divide a function in y-space.
- smooth - Use convolution to smooth the given array.
- _trapezium_loglog -
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
# import scipy as sp
import scipy.stats # noqa
import warnings
from . import math_core, interpolate # , statistic
__all__ = [
'cumtrapz_loglog', 'even_selection', 'extend', 'monotonic_smooth', 'rk4_step',
'sample_inverse', 'smooth_convolve', 'spline',
# DEPRECATED
'smooth', '_smooth'
]
def cumtrapz_loglog(yy, xx, bounds=None, axis=-1, dlogx=None, lntol=1e-2):
"""Calculate integral, given `y = dA/dx` or `y = dA/dlogx` w/ trapezoid rule in log-log space.
We are calculating the integral `A` given sets of values for `y` and `x`.
To associate `yy` with `dA/dx` then `dlogx = None` [default], otherwise,
to associate `yy` with `dA/dlogx` then `dlogx = True` for natural-logarithm, or `dlogx = b`
for a logarithm of base `b`.
For each interval (x[i+1], x[i]), calculate the integral assuming that y is of the form,
`y = a * x^gamma`
Notes
-----
- When bounds are given that are not identical to input `xx` values, then interpolation must
be performed. This can be done on the resulting cumsum'd values, or on the input integrand
values. The cumsum values are *not necessarily a power-law* (for negative indices), and thus
the interpolation is better performed on the input `yy` values.
"""
yy = np.asarray(yy)
xx = np.asarray(xx)
if bounds is not None:
if len(bounds) != 2 or np.any(~math_core.within(bounds, xx)) or (bounds[0] > bounds[1]):
err = "Invalid `bounds` = '{}', xx extrema = '{}'!".format(
bounds, math_core.minmax(xx))
raise ValueError(err)
if axis != -1 or np.ndim(yy) > 1:
newy = interpolate.interp_func(xx, yy, xlog=True, ylog=True)(bounds)
else:
newy = interpolate.interp(bounds, xx, yy, xlog=True, ylog=True, valid=False)
# newy = interpolate.interp(bounds, xx, yy, xlog=True, ylog=True, valid=False)
ii = np.searchsorted(xx, bounds)
xx = np.insert(xx, ii, bounds, axis=axis)
yy = np.insert(yy, ii, newy, axis=axis)
ii = np.array([ii[0], ii[1]+1])
assert np.alltrue(xx[ii] == bounds), "FAILED!"
yy = np.ma.masked_values(yy, value=0.0, atol=0.0)
# if np.ndim(yy) > 1 and np.ndim(xx) == 1:
if np.ndim(yy) != np.ndim(xx):
if np.ndim(yy) < np.ndim(xx):
raise ValueError("BAD SHAPES")
cut = [slice(None)] + [np.newaxis for ii in range(np.ndim(yy)-1)]
xx = xx[tuple(cut)]
log_base = np.e
if dlogx is not None:
# If `dlogx` is True, then we're using log-base-e (i.e. natural-log)
# Otherwise, set the log-base to the given value
if dlogx is not True:
log_base = dlogx
# Numerically calculate the local power-law index
delta_logx = np.diff(np.log(xx), axis=axis)
gamma = np.diff(np.log(yy), axis=axis) / delta_logx
xx = np.moveaxis(xx, axis, 0)
yy = np.moveaxis(yy, axis, 0)
aa = np.mean([xx[:-1] * yy[:-1], xx[1:] * yy[1:]], axis=0)
aa = np.moveaxis(aa, 0, axis)
xx = np.moveaxis(xx, 0, axis)
yy = np.moveaxis(yy, 0, axis)
# Integrate dA/dx
# A = (x1*y1 - x0*y0) / (gamma + 1)
if dlogx is None:
dz = np.diff(yy * xx, axis=axis)
trapz = dz / (gamma + 1)
# when the power-law is (near) '-1' then, `A = a * log(x1/x0)`
idx = np.isclose(gamma, -1.0, atol=lntol, rtol=lntol)
# Integrate dA/dlogx
# A = (y1 - y0) / gamma
else:
dy = np.diff(yy, axis=axis)
trapz = dy / gamma
# when the power-law is (near) '-1' then, `A = a * log(x1/x0)`
idx = np.isclose(gamma, 0.0, atol=lntol, rtol=lntol)
trapz[idx] = aa[idx] * delta_logx[idx]
integ = np.log(log_base) * np.cumsum(trapz, axis=axis)
if bounds is not None:
# NOTE: **DO NOT INTERPOLATE INTEGRAL** this works badly for negative power-laws
# lo, hi = interpolate.interp(bounds, xx[1:], integ, xlog=True, ylog=True, valid=False)
# integ = hi - lo
integ = np.moveaxis(integ, axis, 0)
lo, hi = integ[ii-1, ...]
integ = hi - lo
return integ
def even_selection(size, select, sel_is_true=True):
"""Create a boolean indexing array of length `size` with `select`, evenly spaced elements.
If `sel_is_true == True` then there are `select` True elements and the rest are False.
If `sel_is_true == False` then there are `select` False elements and the rest are True.
"""
y = True if sel_is_true else False
n = (not y)
if select > size:
raise ValueError("Cannot select {}/{} elements!".format(select, size))
if select == size:
cut = np.ones(size, dtype=bool) * y
elif select > size/2:
cut = np.ones(size, dtype=bool) * y
q, r = divmod(size, size-select)
indices = [q*i + min(i, r) for i in range(size-select)]
cut[indices] = n
else:
cut = np.ones(size, dtype=bool) * n
q, r = divmod(size, select)
indices = [q*i + min(i, r) for i in range(select)]
cut[indices] = y
return cut
def extend(arr, num=1, log=True, append=False):
"""Extend the given array by extraplation.
Arguments
---------
arr <flt>[N] : array to extend
num <int> : number of points to add (on each side, if ``both``)
log <bool> : extrapolate in log-space
append <bool> : add the extended points onto the given array
Returns
-------
retval <flt>[M] : extension (or input ``arr`` with extension added, if ``append``).
"""
if(log): useArr = np.log10(arr)
else: useArr = np.array(arr)
steps = np.arange(1, num+1)
left = useArr[0] + (useArr[0] - useArr[1])*steps[::-1].squeeze()
rigt = useArr[-1] + (useArr[-1] - useArr[-2])*steps.squeeze()
if(log):
left = np.power(10.0, left)
rigt = np.power(10.0, rigt)
if(append): return np.hstack([left, arr, rigt])
return [left, rigt]
def monotonic_smooth(vals, window_size=4, expand_size=1, max_iter=10,
thresh=-0.01, details=False, **kwargs):
"""Try to smooth out non-monotonicities in the given array.
NOTE: causes some sub-optimal edge effects.
Arguments
---------
vals: (N,) scalar
Input values to smooth.
window_size
expand_size: int,
Expand the region being smoothed over to include this many neighboring points,
outside of the non-monotonic region.
max_iter
thresh : scalar,
Differences between subsequent points must be less than this value to be considered
as non-monotonicities.
Returns
-------
yy: (N,) scalar
Smoothed array.
"""
if np.ndim(vals) > 1:
raise ValueError("Input array must be 1D")
yy = np.copy(vals)
# Smooth if the curve is not monotonic
bads = (np.diff(yy) < thresh)
cnt = 0
dets = []
while any(bads) and cnt < max_iter:
bads = np.where(bads)[0]
lo = bads[0]
lo = np.max([lo - expand_size, 0])
hi = bads[-1]+1
hi = np.min([hi + expand_size, yy.size + 1])
if details:
dets.append([[lo, hi], np.copy(yy[lo:hi])])
yy[lo:hi] = smooth_convolve(yy, window_size, **kwargs)[lo:hi]
bads = (np.diff(yy) < thresh)
cnt += 1
if details:
return yy, dets
return yy
def ndinterp(xx, xvals, yvals, xlog=True, ylog=True):
"""Interpolate 2D data to an array of points.
`xvals` and `yvals` are (N, M) where the interpolation is done along the 1th (`M`)
axis (i.e. interpolation is done independently for each `N` row. Should be generalizeable to
higher dim.
"""
# Convert to (N, T, M)
# `xx` is (T,) `xvals` is (N, M) for N-binaries and M-steps
select = (xx[np.newaxis, :, np.newaxis] <= xvals[:, np.newaxis, :])
# (N, T)
aft = np.argmax(select, axis=-1)
# zero values in `aft` mean no xvals after the targets were found
valid = (aft > 0)
inval = ~valid
bef = np.copy(aft)
bef[valid] -= 1
# (2, N, T)
cut = [aft, bef]
# (2, N, T)
xvals = [np.take_along_axis(xvals, cc, axis=-1) for cc in cut]
# Find how far to interpolate between values (in log-space)
# (N, T)
frac = (xx[np.newaxis, :] - xvals[1]) / np.subtract(*xvals)
# (2, N, T)
data = [np.take_along_axis(yvals, cc, axis=-1) for cc in cut]
# Interpolate by `frac` for each binary
new = data[1] + (np.subtract(*data) * frac)
# Set invalid binaries to nan
new[inval, ...] = np.nan
new = new
return new
def regress(xx, yy):
"""Perform *linear* regression on the *zeroth* dimension of the given (ND) data.
Arguments
---------
xx : (N, ...) array_like of scalar
Independent variable of data.
yy : (N, ...) array_like of scalar
Dependent variable of data, with shape matching that of `xx`.
Returns
-------
coeff : (2, ...) np.ndarray of scalar
The linear regression coefficients, such that the 0th element is the slope, and the 1st is
the y-intercept. The shape of `coeff` is such that ``coeff.shape[1:] == xx.shape[1:]``.
zz : (N, ...) np.ndarray of scalar
The model/prediction values using the linear regression and the input `xx` values.
Same shape as `xx` and `yy`.
"""
if np.shape(xx) != np.shape(yy):
err = "Shape of xx ({}) does not match that of yy ({})!".format( | np.shape(xx) | numpy.shape |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from astropy import constants
from astropy.io import fits
from astropy import coordinates
from astropy import wcs
from astropy import units
import os
try:
from fpdf import FPDF
except:
pass
def getrms(hours = 16, Nant = 60, velocity_width = 5.513, obs_freq = 1.421e9, f_tap = 1., outstring = ''):
locstring = ''
# Constants and conversion factors
J = 1e26 # Jy to Watts conversion factor
h1 = 1420405751.7667 # HI frequency in Hz
diameter = 13.5 # MeerKAT diameter in m
# Fitted values from recent work. Tsys/eta
specs = np.array([[ 881. ,24.87730995],
[ 913.875 ,24.49771763],
[ 946.75 ,24.16024859],
[ 979.625 ,23.64646727],
[1012.5 ,24.07896985],
[1045.375 ,23.79283849],
[1078.25 ,22.70843003],
[1111.125 ,22.93770384],
[1144. ,22.84885476],
[1176.875 ,22.12287765],
[1209.75 ,21.49206455],
[1242.625 ,21.16654511],
[1275.5 ,20.96656328],
[1308.375 ,20.6466135 ],
[1341.25 ,20.46467585],
[1374.125 ,20.35819618],
[1407. ,20.33486544],
[1439.875 ,20.45917325],
[1472.75 ,20.46422681],
[1505.625 ,20.53214192],
[1538.5 ,21.29373981],
[1571.375 ,20.78716734],
[1604.25 ,20.91109069],
[1637.125 ,21.14846713],
[1670. ,24.40091906]])
f = specs[:,0]*1e6 # frequency axis above, Hz
NPol = 2
A = np.pi * (diameter/2)**2
Tsys_eta = specs[:,1]
bw = h1/constants.c.value*velocity_width*1000.
tau = 3600 * hours
Tsys_per_eta = Tsys_eta[np.argmin(np.abs(f - obs_freq))]
locstring += 'T_sys/eta at {0:3.0f} MHz is {1:2.1f}K\n\n'.format(obs_freq/1e6,Tsys_per_eta)
rmsnat = 2 * constants.k_B.value * Tsys_per_eta / (A * np.sqrt(Nant * (Nant-1) * bw * tau)) * J / np.sqrt(NPol)
rmsbeam = f_tap*rmsnat
locstring += 'Using the radiometer formula:\n'
locstring += ' the natural rms noise for one pointing after {:.1f}h on-source is {:2.4g} mJy/beam over {:.2f} km/s\n'.format(hours, rmsnat*1e3, velocity_width)
locstring += ' the rms noise when applying a penalty of {:.4g} is {:2.4g} mJy/beam over {:.2f} km/s\n'.format(f_tap, rmsbeam*1e3, velocity_width)
print('{}'.format(locstring))
locstring += '\n'
outstring += locstring
return [rmsbeam, outstring]
# Create mosaic using polygon
def returnradecindeg(ra, dec):
"""
Uses ra and dec as an input in a astropy-readable format and returns [ra, dec] in degrees.
"""
# Pointings can be given in any string understood by astropy or in decimals, in which case they would be deg
try:
wcscoords = coordinates.SkyCoord(ra, dec, frame='icrs')
except:
try:
wcscoords = coordinates.SkyCoord(ra, dec, frame='icrs', unit='deg')
ra = wcscoords.ra.deg
dec = wcscoords.dec.deg
except ValueError:
print('Wrong format for pointings')
return [wcscoords.ra.deg, wcscoords.dec.deg]
def convertosides(polypointsx, polypointsy):
"""
Takes polypoints describing the corners of a polygon and returns list of quadruples describing sides of polygon
Input:
polypointsx (list of float): corners of the polygon in a consecutive order, any format, x
polypointsy (list of float): corners of the polygon in a consecutive order, any format, y
Output will be [[x1,x2,y1,y2], ...]
"""
sides = []
for i in range(len(polypointsx)-1):
sides.append([polypointsx[i], polypointsx[i+1], polypointsy[i], polypointsy[i+1]])
sides.append([polypointsx[len(polypointsx)-1], polypointsx[0], polypointsy[len(polypointsx)-1], polypointsy[0]])
return sides
def outpolygon(point, sidesofpolygon, border):
"""
Determines whether point in part of polygon described by sidesofpolygon
Input:
point (pair of float): x and y coordinate of polygon
sidesofpolygon (list of quadrupel of float): list of sides of polygon [[x1,x2,y1,y2], ...]
border: Maximally allowed distance from polygon, if positive the mosaic is shrinked inside
the polygon, if negative, it is expanded.
Return: True (point is outside of polygon) or False (point is inside of polygon)
"""
i = 0
for side in sidesofpolygon:
if point[0] < max(side[0:2]) and point[0] > min(side[0:2]):
if side[2]+(point[0]-side[0])*(side[3]-side[2])/(side[1]-side[0]) > point[1]:
i += 1
# point inside polygon
if i%2 > 0:
thareturn = False
# point outside polygon
else:
thareturn = True
if thareturn:
if border < 0:
for lseg in sidesofpolygon:
if distpointlseg(point, lseg) < | np.fabs(border) | numpy.fabs |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import axes
from scipy import stats
from scipy import interpolate
from scipy import linalg
def round_lims(values, round_factor=0.5):
"""
Identify rounded minimum and maximum based on appropriate power of 10
and round_factor.
round_place = 10 ** ceil( log10((max-min))-1 )
Minimum = (floor(min / round_place / round_factor)
* round_place * round_factor)
Maximum = (ceil(max / round_place / round_factor)
* round_place * round_factor)
E.g. [10, 39, 43] yields (10, 50) with round_factor = 1 (nearest 10)
[10, 39, 43] yields (0, 100) with round_factor = 10 (nearest 100)
[10, 39, 43] yields (0, 45) with round_factor = 0.5 (nearest 5)
Args:
values (np.ndarray, list): vector of values of interest.
round_factor (float): multiplicative factor for rounding power
(Default = 0.5).
Returns:
lims: tuple of (rounded minimum, rounded maximum)
"""
min_val = np.min(values)
max_val = np.max(values)
round_place = 10 ** np.ceil(np.log10(np.ptp([min_val, max_val])) - 1)
rounded_min = (np.floor(min_val / round_place / round_factor)
* round_place * round_factor)
rounded_max = (np.ceil(max_val / round_place / round_factor)
* round_place * round_factor)
lims = (rounded_min, rounded_max)
tick_factor = round_place * round_factor
return lims, tick_factor
def density_scatter(references,
predictions,
ax=None,
loglog=False,
lims=None,
lim_factor=0.5,
subset_threshold=1000,
cmap=None,
metrics=True,
text_size=10,
units=None,
labels=True,
label_size=10,
**scatter_kwargs):
"""
Plot regression performance with a scatter plot of predictions vs.
references, colored by log-density of points. Optionally display
mean-absolute error, root-mean-square error, minimum residual,
and maximum residual.
Args:
references (list, np.ndarray): Vector of Y-axis values.
predictions (list, np.ndarray): Vector of X-axis values.
ax (axes.Axes): Optional handle for existing matplotlib axis object
loglog (bool): whether to plot on a log-log scale.
lims (tuple): lower and upper bounds for axis limits.
lim_factor (float): tuning factor for automatically determining limits.
subset_threshold (int): maximum number of points to plot.
If exceeded, subset will be selected randomly.
cmap (matplotlib.colors.LinearSegmentedColormap): color map.
metrics (bool): plot text with metrics e.g. root-mean-square-error.
text_size (int): fontsize for metrics text.
units (str): units for axis labels.
labels (bool): add axis labels.
label_size (int): fontsize for axis and tick labels.
**scatter_kwargs: keyword arguments for plt.scatter function.
Returns:
fig & ax: matplotlib figure and axis.
"""
if ax is None:
fig, ax = plt.subplots()
fig_tuple = (fig, ax)
else:
fig_tuple = (None, None)
if 's' not in scatter_kwargs.keys():
scatter_kwargs['s'] = 1 # default marker size
if cmap is None:
cmap = cm.viridis
x = np.array(references)
y = | np.array(predictions) | numpy.array |
import itertools
import numpy as np
import torch
from torch import nn
import pandas as pd
from itertools import permutations
from tools import loss_func_tools
from tools import data_generation_tools
from tools import spo_framework
from tools import prediction_tools
from tools import optimization_oracle_tools
from tools import optim_tools
def portfolio_model_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False,
data_gen_model='portfolio'):
n_features = model_params['n_features']
n_samples = model_params['n_samples']
dim_cost = model_params['dim_cost']
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map = {
'spop': loss_func_tools.spop_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
pred_model_map = {
'linear': prediction_tools.linear_prediction_model,
'two_layers': prediction_tools.two_layers_model,
}
pred_model_back_map = {
'linear': prediction_tools.linear_prediction_model_back,
'two_layers': prediction_tools.two_layers_model_back,
}
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
}
optimization_params = {'r': np.log(dim_cost) - np.log(dim_cost - 0.9)}
# optimization_params = {'r': np.log(dim_cost) / 2}
baseline_action = torch.ones(dim_cost) / dim_cost
test_results = pd.DataFrame(columns=data_param_name + [
'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline'])
def _clone_params(num_params):
num_params_copy = {}
for num_param in num_params:
num_params_copy[num_param] = num_params[num_param].detach().clone()
return num_params_copy
for param_value in itertools.product(*data_param_value, range(n_trails)):
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value):
param[name] = value
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param)
actions_hindsight, _ = optimization_oracle_tools.entropy_oracle(y_test, optimization_params, False)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param,
model_coef=model_coef)
for pred_model in pred_model_list:
if pred_model == 'linear':
initial_params = {
'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')),
'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32'))
}
elif pred_model == 'two_layers':
hidden_dim = model_params.get('hidden_dim', 256)
initial_params = {
'W1': torch.from_numpy(
(np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')),
'W2': torch.from_numpy(
(np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')),
'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')),
'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')),
}
else:
raise Exception(
'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model)
for j, loss_func in enumerate(loss_list):
if pred_model == 'two_layers' and loss_func == 'l2':
lr = 0.01
elif pred_model == 'linear':
if loss_func == 'spo':
lr = 1.
else:
lr = 0.1
else:
lr = 1.
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': pred_model_map[pred_model],
'model_params': _clone_params(initial_params),
'predict_model_back': pred_model_back_map[pred_model],
'optimization_oracle': optimization_oracle_tools.entropy_oracle,
'optimization_params': optimization_params,
'optimization_oracle_back': optimization_oracle_tools.entropy_oracle_back,
'loss_func': loss_map[loss_func],
'optimizer': optim_tools.adam,
# 'optimizer': optim_tools.sgd_momentum,
# Notes:
# SPO, teo layers: lr = 1.0
# 'optimizer_config': {'learning_rate': lr, 'momentum': 0.9, 'lr_decay': 0.995},
'require_grad': True,
})
loss = spo_model.update(
x_input, y_input, num_iter=20000, if_quiet=True,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight},
if_test_ini=if_test_ini and (j == 0),
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo,
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo,
]
return test_results
def portfolio_model_excess_risk_test(model_params, data_params, test_params, loss_list, pred_model_list,
if_test_ini=False, data_gen_model='portfolio'):
n_features = model_params['n_features']
n_samples = model_params['n_samples']
dim_cost = model_params['dim_cost']
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map = {
'spop': loss_func_tools.spop_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
pred_model_map = {
'linear': prediction_tools.linear_prediction_model,
'two_layers': prediction_tools.two_layers_model,
}
pred_model_back_map = {
'linear': prediction_tools.linear_prediction_model_back,
'two_layers': prediction_tools.two_layers_model_back,
}
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
}
optimization_params = {'r': np.log(dim_cost) - np.log(dim_cost - 0.9)}
# optimization_params = {'r': np.log(dim_cost) / 2}
baseline_action = torch.ones(dim_cost) / dim_cost
test_results = pd.DataFrame(columns=data_param_name + [
'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'normal_mean_spo_loss'])
def _clone_params(num_params):
num_params_copy = {}
for num_param in num_params:
num_params_copy[num_param] = num_params[num_param].detach().clone()
return num_params_copy
for param_value in itertools.product(*data_param_value, range(n_trails)):
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value):
param[name] = value
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param)
actions_hindsight, _ = optimization_oracle_tools.entropy_oracle(y_test, optimization_params, False)
y_mean = model_coef['c_mean'].detach().clone()
acction_y_mean, _ = optimization_oracle_tools.entropy_oracle(y_mean, optimization_params, False)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param,
model_coef=model_coef)
flag_mean_spo_loss = True
for pred_model in pred_model_list:
if pred_model == 'linear':
initial_params = {
'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')),
'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32'))
}
elif pred_model == 'two_layers':
hidden_dim = model_params.get('hidden_dim', 256)
initial_params = {
'W1': torch.from_numpy(
(np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')),
'W2': torch.from_numpy(
(np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')),
'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')),
'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')),
}
else:
raise Exception(
'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model)
for j, loss_func in enumerate(loss_list):
if pred_model == 'two_layers' and loss_func == 'l2':
lr = 0.01
elif pred_model == 'linear':
if loss_func == 'spo':
lr = 1.
else:
lr = 0.1
else:
lr = 1.
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': pred_model_map[pred_model],
'model_params': _clone_params(initial_params),
'predict_model_back': pred_model_back_map[pred_model],
'optimization_oracle': optimization_oracle_tools.entropy_oracle,
'optimization_params': optimization_params,
'optimization_oracle_back': optimization_oracle_tools.entropy_oracle_back,
'loss_func': loss_map[loss_func],
'optimizer': optim_tools.adam,
# 'optimizer': optim_tools.sgd_momentum,
# Notes:
# SPO, teo layers: lr = 1.0
# 'optimizer_config': {'learning_rate': lr, 'momentum': 0.9, 'lr_decay': 0.995},
'require_grad': True,
})
loss = spo_model.update(
x_input, y_input, num_iter=20000, if_quiet=True,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight,
'cost_mean': y_mean, 'action_cost_mean': acction_y_mean, },
if_test_ini=if_test_ini and (j == 0), if_mean_spo_loss=flag_mean_spo_loss,
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if flag_mean_spo_loss:
loss_mean = loss['loss_mean']
normal_spo_loss_mean = loss_mean / hindsight
flag_mean_spo_loss = False
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo, normal_spo_loss_mean,
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo, normal_spo_loss_mean,
]
return test_results
def portfolio_argmax_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False,
data_gen_model='portfolio'):
n_features = model_params['n_features']
n_samples = model_params['n_samples']
dim_cost = model_params['dim_cost']
hidden_dim = model_params.get('hidden_dim', 128)
minmax = model_params.get('min/max', 'max')
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map = {
'spop': loss_func_tools.spop_argmax_loss_func,
# 'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
def pred_model_map(pred_model):
if pred_model == 'linear':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=dim_cost),
)
elif pred_model == 'two_layers':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=hidden_dim),
nn.ReLU(),
nn.Linear(in_features=hidden_dim, out_features=dim_cost),
)
else:
raise Exception('Prediction Model Type Error!')
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
}
baseline_action = torch.ones(dim_cost) / dim_cost
optimization_params = {'const': None}
test_results = pd.DataFrame(columns=data_param_name + [
'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline'])
for param_value in itertools.product(*data_param_value, range(n_trails)):
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value):
param[name] = value
################################
# Something new here about neg #
################################
neg = minmax == 'max'
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param, neg=neg)
actions_hindsight, _ = optimization_oracle_tools.softmax_oracle(y_test, optimization_params, False)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param,
model_coef=model_coef, neg=neg)
for pred_model in pred_model_list:
for j, loss_func in enumerate(loss_list):
predict_model = pred_model_map(pred_model)
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': predict_model,
'optimization_oracle': optimization_oracle_tools.softmax_oracle,
'optimization_params': optimization_params,
'loss_func': loss_map[loss_func],
'optimizer': torch.optim.Adam(predict_model.parameters()),
'require_grad': False,
'minibatch_size': 64,
'if_argmax': True,
})
loss = spo_model.update(
x_input, y_input, num_iter=10000, if_quiet=True,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight},
if_test_ini=if_test_ini and (j == 0),
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
print(loss_func, pred_model, loss_test, hindsight)
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo,
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo,
]
return test_results
def barrier_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False,
data_gen_model='portfolio'):
n_features = model_params['n_features']
n_samples = model_params['n_samples']
dim_cost = model_params['dim_cost']
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map = {
'spop': loss_func_tools.spop_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
pred_model_map = {
'linear': prediction_tools.linear_prediction_model,
'two_layers': prediction_tools.two_layers_model,
}
pred_model_back_map = {
'linear': prediction_tools.linear_prediction_model_back,
'two_layers': prediction_tools.two_layers_model_back,
}
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
'multi_class': data_generation_tools.multi_class_data,
}
optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)}
# optimization_params = {'r': np.log(dim_cost) / 2}
baseline_action = torch.ones(dim_cost, dtype=torch.float64) / dim_cost
test_results = pd.DataFrame(columns=data_param_name + [
'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline'])
def _clone_params(num_params):
num_params_copy = {}
for num_param in num_params:
num_params_copy[num_param] = num_params[num_param].detach().clone()
return num_params_copy
for param_value in itertools.product(*data_param_value, range(n_trails)):
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value):
param[name] = value
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param, neg=False)
actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False)
argmin_hindsight = y_test.argmin(dim=1, keepdim=True)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=False)
for pred_model in pred_model_list:
if pred_model == 'linear':
initial_params = {
'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float64')),
'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float64'))
}
elif pred_model == 'two_layers':
hidden_dim = model_params.get('hidden_dim', 256)
initial_params = {
'W1': torch.from_numpy(
(np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float64')),
'W2': torch.from_numpy(
(np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float64')),
'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float64')),
'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float64')),
}
else:
raise Exception(
'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model)
for j, loss_func in enumerate(loss_list):
if pred_model == 'two_layers' and loss_func == 'l2':
lr = 0.01
elif pred_model == 'linear':
if loss_func == 'spo':
lr = 1.
else:
lr = 0.1
else:
lr = 1.
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': pred_model_map[pred_model],
'model_params': _clone_params(initial_params),
'predict_model_back': pred_model_back_map[pred_model],
'optimization_oracle': optimization_oracle_tools.barrier_oracle,
'optimization_params': optimization_params,
'test_optimization_oracle': optimization_oracle_tools.argmin_test,
'test_optimization_params': {'arg': 'min'},
'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back,
'loss_func': loss_map[loss_func],
'optimizer': optim_tools.adam,
# 'optimizer': optim_tools.sgd_momentum,
# Notes:
# SPO, teo layers: lr = 1.0
'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.99},
'require_grad': True,
'if_argmax': True,
'minibatch_size': 8,
})
loss = spo_model.update(
x_input, y_input, num_iter=3000, if_quiet=False,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight,
'argmin_hindsight': argmin_hindsight,
},
if_test_ini=if_test_ini and (j == 0),
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
print(loss_func, pred_model, 'test spo loss:', loss_test, 'best cost in hindsight', hindsight)
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo,
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo,
]
return test_results
def shortest_path_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False,
data_gen_model='shortest_path'):
n_features = model_params['n_features']
n_samples = model_params['n_samples']
dim_cost = model_params['dim_cost']
hidden_dim = model_params.get('hidden_dim', 128)
grid_dim = model_params.get('grid_dim', 4)
assert dim_cost == 2 * grid_dim * (grid_dim - 1), 'cost dim doesnot match grid dim!'
min_max = model_params.get('min_max', 'min')
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map = {
'spop': loss_func_tools.spop_loss_func,
# 'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
def pred_model_map(_pred_model):
if _pred_model == 'linear':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=dim_cost),
)
elif _pred_model == 'two_layers':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=hidden_dim),
nn.ReLU(),
nn.Linear(in_features=hidden_dim, out_features=dim_cost),
)
else:
raise Exception('Prediction Model Type Error!')
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
}
baseline_action = torch.zeros(dim_cost)
# optimization_params = {'const': None}
test_results = pd.DataFrame(columns=data_param_name + [
'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline'])
def _path_decoding(_grid_dim, path_encoded):
loc_x, loc_y = 0, 0
num_edges = _grid_dim * (_grid_dim - 1)
path_decoded = np.zeros(2 * num_edges)
for direction in path_encoded:
if direction:
path_decoded[1 * loc_x + (_grid_dim - 1) * loc_y + num_edges] = 1
loc_x += 1
else:
path_decoded[(_grid_dim - 1) * loc_x + 1 * loc_y] = 1
loc_y += 1
return path_decoded
def _construct_grid_path(_grid_dim):
assert _grid_dim >= 2, 'Grid dim at least 2!'
path_0 = [0] * (_grid_dim - 1) + [1] * (_grid_dim - 1)
paths_encoded = list(set(permutations(path_0)))
paths = []
for path_encoded in paths_encoded:
paths.append(_path_decoding(_grid_dim, path_encoded))
paths = np.array(paths, dtype='float32')
return torch.from_numpy(paths)
optimization_params = {
'paths': _construct_grid_path(grid_dim),
'min_max': min_max,
}
for param_value in itertools.product(*data_param_value, range(n_trails)):
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value):
param[name] = value
################################
# Something new here about neg #
################################
neg = min_max == 'max'
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param, neg=neg)
actions_hindsight, _ = optimization_oracle_tools.shortest_path_oracle(y_test, optimization_params, False)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param,
model_coef=model_coef, neg=neg)
for pred_model in pred_model_list:
print(pred_model)
for j, loss_func in enumerate(loss_list):
predict_model = pred_model_map(pred_model)
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': predict_model,
'optimization_oracle': optimization_oracle_tools.shortest_path_oracle,
'optimization_params': optimization_params,
'loss_func': loss_map[loss_func],
'optimizer': torch.optim.Adam(predict_model.parameters()),
'require_grad': False,
'minibatch_size': 64,
'if_argmax': True,
})
loss = spo_model.update(
x_input, y_input, num_iter=10000, if_quiet=True,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight},
if_test_ini=if_test_ini and (j == 0),
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
print(loss_func, pred_model, 'test spo loss:', loss_test, 'best cost in hindsight', hindsight)
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo,
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo,
]
return test_results
def barrier_vs_argmin_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False,
data_gen_model='portfolio'):
n_features = model_params['n_features']
n_samples_list = model_params['n_samples']
dim_cost = model_params['dim_cost']
neg = model_params.get('neg', False)
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map_barrier = {
'spop': loss_func_tools.spop_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
loss_map_argmin = {
'spop': loss_func_tools.spop_argmax_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
pred_model_map = {
'linear': prediction_tools.linear_prediction_model,
'two_layers': prediction_tools.two_layers_model,
}
pred_model_back_map = {
'linear': prediction_tools.linear_prediction_model_back,
'two_layers': prediction_tools.two_layers_model_back,
}
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
'multi_class': data_generation_tools.multi_class_data,
}
optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)}
# optimization_params = {'r': np.log(dim_cost) / 2}
baseline_action = torch.ones(dim_cost) / dim_cost
test_results = pd.DataFrame(columns=data_param_name + [
'n_samples', 'i', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'type'])
def _clone_params(num_params):
num_params_copy = {}
for num_param in num_params:
num_params_copy[num_param] = num_params[num_param].detach().clone()
return num_params_copy
def _pred_model_map(_pred_model):
if _pred_model == 'linear':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=dim_cost),
)
elif _pred_model == 'two_layers':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=hidden_dim),
nn.ReLU(),
nn.Linear(in_features=hidden_dim, out_features=dim_cost),
)
else:
raise Exception('Prediction Model Type Error!')
for param_value_tuple in itertools.product(*data_param_value, n_samples_list, range(n_trails)):
param_value = list(param_value_tuple)
n_samples = param_value[-2]
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value[:-2]):
param[name] = value
print(param, param_value)
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param, neg=neg)
actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False)
argmin_hindsight = y_test.argmin(dim=1, keepdim=True)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg)
for pred_model in pred_model_list:
if pred_model == 'linear':
initial_params = {
'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')),
'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32'))
}
elif pred_model == 'two_layers':
hidden_dim = model_params.get('hidden_dim', 256)
initial_params = {
'W1': torch.from_numpy(
(np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')),
'W2': torch.from_numpy(
(np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')),
'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')),
'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')),
}
else:
raise Exception(
'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model)
for j, loss_func in enumerate(loss_list):
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': pred_model_map[pred_model],
'model_params': _clone_params(initial_params),
'predict_model_back': pred_model_back_map[pred_model],
'optimization_oracle': optimization_oracle_tools.barrier_oracle,
'optimization_params': optimization_params,
'test_optimization_oracle': optimization_oracle_tools.argmin_test,
'test_optimization_params': {'arg': 'min'},
'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back,
'loss_func': loss_map_barrier[loss_func],
'optimizer': optim_tools.adam,
# 'optimizer': optim_tools.sgd_momentum,
# Notes:
# SPO, teo layers: lr = 1.0
'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.999},
'require_grad': True,
'if_argmax': True,
})
loss = spo_model.update(
x_input, y_input, num_iter=5000, if_quiet=True,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight,
'argmin_hindsight': argmin_hindsight,
},
if_test_ini=if_test_ini and (j == 0),
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
print(loss_func, pred_model, loss_test, hindsight, 'barrier')
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo, 'barrier',
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo, 'barrier',
]
print('argmin start.')
predict_model = _pred_model_map(pred_model)
spo_model = spo_framework.SpoTest({
'n_features': n_features,
'dim_cost': dim_cost,
'baseline_action': baseline_action,
'predict_model': predict_model,
'optimization_oracle': optimization_oracle_tools.softmax_oracle,
'optimization_params': {'const': None},
'loss_func': loss_map_argmin[loss_func],
'optimizer': torch.optim.Adam(predict_model.parameters()),
'require_grad': False,
'minibatch_size': min(64, n_samples),
'if_argmax': True,
})
loss = spo_model.update(
x_input, y_input, num_iter=10000, if_quiet=True,
test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': argmin_hindsight},
if_test_ini=if_test_ini and (j == 0),
)
loss_test = loss['loss_spo_test']
hindsight = loss['hindsight']
print(loss_func, pred_model, loss_test, hindsight, 'argmin')
normal_spo = loss_test / hindsight
train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight
if loss['loss_spo_baseline'] is not None:
baseline_spo = loss['loss_spo_baseline'] / hindsight
else:
baseline_spo = None
if if_test_ini:
if j == 0:
loss_ini = loss['loss_spo_test_ini']
hind_ini = loss['hindsight_ini']
spo_ini = loss_ini / hind_ini
test_results.loc[len(test_results.index)] = list(param_value) + [
loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini,
baseline_spo, 'argmin',
]
else:
test_results.loc[len(test_results.index)] = list(param_value) + [
loss_func, pred_model, normal_spo, hindsight, train_spo, None,
baseline_spo, 'argmin',
]
return test_results
def barrier_vs_argmin_excess_risk_test(model_params, data_params, test_params, loss_list, pred_model_list,
if_test_ini=False, data_gen_model='portfolio'):
n_features = model_params['n_features']
n_samples_list = model_params['n_samples']
dim_cost = model_params['dim_cost']
neg = model_params.get('neg', False)
# deg_list = data_params['deg']
# tau_list = data_params['tau']
# n_factors_list = data_params['n_factors']
data_param_name, data_param_value = [], []
for param_name in data_params:
data_param_name.append(param_name)
data_param_value.append(data_params[param_name])
test_set_size = test_params['test_size']
n_trails = test_params['n_trails']
loss_map_barrier = {
'spop': loss_func_tools.spop_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
loss_map_argmin = {
'spop': loss_func_tools.spop_argmax_loss_func,
'spo': loss_func_tools.spo_loss_func,
'l2': loss_func_tools.mse_loss_func,
'l1': loss_func_tools.abs_loss_func,
}
pred_model_map = {
'linear': prediction_tools.linear_prediction_model,
'two_layers': prediction_tools.two_layers_model,
}
pred_model_back_map = {
'linear': prediction_tools.linear_prediction_model_back,
'two_layers': prediction_tools.two_layers_model_back,
}
data_gen_map = {
'portfolio': data_generation_tools.portfolio_data,
'shortest_path': data_generation_tools.shortest_path_data,
'multi_class': data_generation_tools.multi_class_data,
}
optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)}
# optimization_params = {'r': np.log(dim_cost) / 2}
baseline_action = torch.ones(dim_cost) / dim_cost
test_results = pd.DataFrame(columns=data_param_name + [
'n_samples', 'i', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight',
'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'normal_mean_spo_loss', 'type'])
def _clone_params(num_params):
num_params_copy = {}
for num_param in num_params:
num_params_copy[num_param] = num_params[num_param].detach().clone()
return num_params_copy
def _pred_model_map(_pred_model):
if _pred_model == 'linear':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=dim_cost),
)
elif _pred_model == 'two_layers':
return nn.Sequential(
nn.Linear(in_features=n_features, out_features=hidden_dim),
nn.ReLU(),
nn.Linear(in_features=hidden_dim, out_features=dim_cost),
)
else:
raise Exception('Prediction Model Type Error!')
for param_value_tuple in itertools.product(*data_param_value, n_samples_list, range(n_trails)):
param_value = list(param_value_tuple)
n_samples = param_value[-2]
if param_value[-1] == 0:
print(param_value)
param = {}
for name, value in zip(data_param_name, param_value[:-2]):
param[name] = value
print(param, param_value)
x_test, y_test, model_coef = data_gen_map[data_gen_model](
n_features, test_set_size, dim_cost, param, neg=neg)
actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False)
argmin_hindsight = y_test.argmin(dim=1, keepdim=True)
x_input, y_input, _ = data_gen_map[data_gen_model](
n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg)
y_mean = model_coef['c_mean'].detach().clone()
action_y_mean, _ = optimization_oracle_tools.barrier_oracle(y_mean, optimization_params, False)
argmin_hindsight_ymean = y_mean.argmin(dim=1, keepdim=True)
flag_mean_spo_loss = True
for pred_model in pred_model_list:
if pred_model == 'linear':
initial_params = {
'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')),
'b': torch.from_numpy( | np.random.normal(size=dim_cost) | numpy.random.normal |
"""
Metabolite annotation
"""
import numpy as np
from medipy.io import load
import os
import fonc_util as f
import gen_in
import lawij
import ser_xml
import xml_gen
import locale
def ann(root,wa7da):
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, "C")
tabp,tabc=gen_in.ini1(root)
boc=-1
lolo= load(os.path.join(root,"2rr"))
H=lolo.data
D=H[0,:,:]
H=D
#print corps.interquartile_range(H)
#print np.size(H,0),np.size(H,1)
list_exp=ser_xml.exceptions(root)
raxep=[]
while boc<len(tabp)-1:
boc+=1
a=str(tabp[boc][2])
#print a,boc
newn=''
for j in range(len(a)):
if (a[j]=='_')==1:
break
newn+=a[j]
r1=[]
r1.append((boc))
for jj in range(boc+1,len(tabp)):
nomn=str(tabp[jj][2])
#print nomn[0:j]
try:
if nomn[0:j+1]==newn+'_':
r1.append((jj))
#print 'ok'
else:
break
except:
break
boc=boc+len(r1)-1
#print tabp[r1[0]:r1[len(r1)-1]+1]
#raw_input()
#print len(r1)
nt=tabp[r1[0]:r1[len(r1)-1]+1]
#print nt
#print 'Start'
test=[]
testc3=[]
con=0
ampref=[]
amp=[]
newrt=[]
#print newn
ham=0
ed5il=0
for jj in range(len(nt)):
#print newn
#print jj
#print nt[jj][0],nt[jj][1]
#print nt[jj][0],nt[jj][1]
r,indi=lawij.alig(nt[jj][0],nt[jj][1],H,nt[jj][3],wa7da)
#print r,indi,nt[jj][2],nt[jj][0]-r[0],nt[jj][1]-r[1],nt[jj][7]
if nt[jj][7]=='n':
ed5il=1
#raw_input()
if r[0]==100 and nt[jj][7]=='y':
#print r,indi,nt[jj][2],nt[jj][0]-r[0],nt[jj][1]-r[1],nt[jj][7]
ham=1
break
if indi==0 :
con=con+1
if np.abs(r[0])==4 or np.abs(r[1])==6:
testc3.append((1))
else:
testc3.append((0))
test.append((0))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
#print congl
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#print ch
#plt.show()
#print nt[jj][3]
ampref.append(float(nt[jj][3])*1.)
amp.append( H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
#print str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])])
newrt.append((nr[0],nr[1],nt[jj][2],str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]),float(nt[jj][3]),chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
#print newrt
else:
if r[0]<100 :
if np.abs(r[0])==3 or np.abs(r[1])==5:
testc3.append((1))
else:
testc3.append((0))
con=con+1
test.append((1))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
ampref.append(float(nt[jj][3])*1.)
amp.append(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
newrt.append((nr[0],nr[1],nt[jj][2],H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.,float(nt[jj][3]),chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
else:
test.append((2))
testc3.append((2))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[nt[jj][0]-7:(nt[jj][0])+8,(nt[jj][1])-7:(nt[jj][1])+8]*1.
ampref.append(float(nt[jj][3])*1.)
amp.append(0)
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#raw_input()
newrt.append((0,0,0,0,0,0,0,0,0,0,0,0,0))
#raw_input()
#print newn
#print ampref
#print amp,newn,testc3
#print nt
#print newrt
#raw_input()
#amptest=np.nonzero(amp>0)
o=np.nonzero(testc3==0)
vamp=np.array(amp)
ivamref=vamp[o]
o=np.nonzero(ivamref>210000)
#print newn,'test'
#print 'ham',ham
#if (float(len(o[0]))*1.000000001)/float(len(ivamref)*1.00000001)>0.4 or f.exp_hand(list_exp,newn)==1 or ham==0:
if ham==0:
#print 'd5aal'
if len(nt)==con:
if len(nt)==1:
raxep.append(newrt[0])
#print 'accepted'
else:
artestc3=np.array(testc3)
olc3=np.nonzero(artestc3==1)
if np.size(olc3)>0:
if f.exp_ync(amp,ampref,testc3)==1:
#print 'ouffffff'
artest=np.array(test)
ol=np.nonzero(artest==1)
o=np.nonzero(artest==0)
if np.size(ol)>0:
#print 'accepted with some conditions'
#print f.exp_ync(amp,ampref,testc3)
#if f.exp_ync(amp,ampref,testc3)==0:
#print 'llllllaaaaaaaaa'
if f.exp_yn(amp,ampref,test)==1:
for no in range(len(newrt)):
raxep.append(newrt[no])
elif f.exp_hand(list_exp,newn)==1:
artest=np.array(test)
rnt=np.array(newrt)
ol=np.nonzero(artest==1)
o=np.nonzero(artest==0)
vo=rnt[o]
#raw_input()
for no in range(len(vo)):
#print '%%%%%%%%%'
zi=lawij.mod(vo[no])
#print zi
#print artest
#raw_input()
raxep.append((zi))
else:
#print f.exp_ync(amp,ampref,testc3)
for no in range(len(nt)):
raxep.append(newrt[no])
#print 'accepted'
else:
#print 'ouuuuut'
#print f.exp_hand(list_exp,newn)
if f.exp_hand(list_exp,newn)==1 :
artest=np.array(test)
artestc3=np.array(testc3)
rnt=np.array(newrt)
#print nt
#raw_input()
ol=np.nonzero(artest==1)
condlist=[artest==0,artestc3==0]
g=condlist[0]*condlist[1]
#print g,artest,artestc3
o=np.nonzero(artest==0)
#print '%%%hhh%%%%%%'
#print rnt
#print test
vo=rnt[g]
#print vo,'%%%hhh%%%%%%'
#raw_input()
for no in range(len(vo)):
#print '%%%%%%%%%'
zi=lawij.mod(vo[no])
#print zi
#print artest
#raw_input()
raxep.append((zi))
else:
#print f.exp_ync(amp,ampref,testc3)
for no in range(len(nt)):
raxep.append(newrt[no])
#print 'accepted'
else:
if ham==0:
if f.exp_hand(list_exp,newn)==1 or ed5il==1:
artest=np.array(test)
#print newrt
rnt=xml_gen.cell2tab8(newrt)
#print nt
#raw_input()
ol=np.nonzero(artest==1)
o=np.nonzero(artest==0)
#print '%%%hhh%%%%%%'
rnt=np.array(rnt)
#print test
vo=rnt[o]
#print vo
#print len(vo)
#raw_input()
for no in range(len(vo)):
#print '%%%%%%%%%'
zi=lawij.mod(vo[no])
#print zi
#print artest
#raw_input()
raxep.append((zi))
#print 'may be...'
#else:
#print 'refused without discussion'
#print test
#print 'DONE'
#print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
#print ' '
#raw_input()
locale.setlocale(locale.LC_ALL, old_locale)
return raxep,tabc
#for lll in range(len(raxep)):
#print raxep[lll]
def rec(root):
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, "C")
#root='/home/miv/belghith/Bureau/KAROM/Akram/nmr/RP/5/pdata/1'
tabp,tabc=gen_in.ini2(root)
boc=-1
lolo= load(os.path.join(root,"2rr"))
H=lolo.data
D=H[0,:,:]
H=D
#print corps.interquartile_range(H)
#print np.size(H,0),np.size(H,1)
list_exp=ser_xml.exceptions(root)
raxep=[]
while boc<len(tabp)-1:
boc+=1
a=str(tabp[boc][2])
#print a,boc
newn=''
for j in range(len(a)):
if (a[j]=='_')==1:
break
newn+=a[j]
r1=[]
r1.append((boc))
for jj in range(boc+1,len(tabp)):
nomn=str(tabp[jj][2])
#print nomn[0:j]
try:
if nomn[0:j+1]==newn+'_':
r1.append((jj))
#print 'ok'
else:
break
except:
break
boc=boc+len(r1)-1
#print tabp[r1[0]:r1[len(r1)-1]+1]
#raw_input()
#print len(r1)
nt=tabp[r1[0]:r1[len(r1)-1]+1]
#print nt
#print 'Start'
test=[]
testc3=[]
con=0
ampref=[]
amp=[]
newrt=[]
#print newn
for jj in range(len(nt)):
#print newn
#print jj
#print nt[jj][0],nt[jj][1]
#print nt[jj][0],nt[jj][1]
r,indi=lawij.aligrec(nt[jj][0],nt[jj][1],H,nt[jj][3])
#print r,indi,nt[jj][2],nt[jj][0]-r[0],nt[jj][1]-r[1]
#raw_input()
if indi==0 :
con=con+1
#print 'ok'
testc3.append((0))
test.append((0))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
#print congl
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#print ch
#plt.show()
ampref.append(float(nt[jj][3])*1.)
amp.append( H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
#print str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])])
newrt.append((nr[0],nr[1],nt[jj][2],str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]),chg[0],chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
#print newrt
else:
if r[0]<100 :
if np.abs(r[0])==3 or np.abs(r[1])==5:
testc3.append((1))
else:
testc3.append((0))
con=con+1
test.append((1))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
ampref.append(float(nt[jj][3])*1.)
amp.append(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
newrt.append((nr[0],nr[1],nt[jj][2],H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.,chg[0],chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
else:
test.append((2))
testc3.append((2))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[nt[jj][0]-7:(nt[jj][0])+8,(nt[jj][1])-7:(nt[jj][1])+8]*1.
ampref.append(float(nt[jj][3])*1.)
amp.append(0)
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#raw_input()
newrt.append((0,0,0,0,0,0,0,0,0,0,0,0,0))
#raw_input()
#print newn
#print ampref
#print amp,newn,testc3
#print nt
#print newrt
#raw_input()
#amptest=np.nonzero(amp>0)
o=np.nonzero(testc3==0)
vamp=np.array(amp)
ivamref=vamp[o]
o=np.nonzero(ivamref>100)
#print newn,'test'
if (float(len(o[0]))*1.000000001)/float(len(ivamref)*1.00000001)>0.4 or f.exp_hand(list_exp,newn)==1:
#print newn,'d5aal'
if len(nt)==con:
if len(nt)==1:
raxep.append(newrt[0])
#print 'accepted'
else:
artestc3=np.array(testc3)
olc3=np.nonzero(artestc3==1)
if np.size(olc3)>0:
if f.exp_ync(amp,ampref,testc3)==1:
#print 'ouffffff'
artest= | np.array(test) | numpy.array |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
np.arange(35, 68), np.arange(76, 163)))
onsetelecs = ["L7"]
clinresult = 2
elif pat_id == 'la04':
# [1:4 9:13 15:17 22 24:32 44:47 52:58 60 63:64 ...
# 67:70 72:74 77:84 88:91 94:96 98:101 109:111 114:116 121 123:129];
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 13),
np.arange(
14, 17), np.arange(
21, 22), np.arange(
23, 32),
np.arange(43, 47), np.arange(51, 58), np.arange(59, 60),
np.arange(62, 64), np.arange(66, 70), np.arange(71, 74),
np.arange(76, 84), np.arange(87, 91), np.arange(93, 96),
np.arange(97, 101), np.arange(108, 111), np.arange(113, 116),
np.arange(120, 121), np.arange(122, 129)))
# FIRST ABLATION WAS A FAILURE
onsetelecs = ["L'4", "G'1", # 2ND RESECTION REMOVED ALL OF M' ELECTRODES
"M'1", "M'2", "M'3", "M'4", "M'5", "M'6", "M'7",
"M'8", "M'9", "M'10", "M'11", "M'12", "M'13", "M'14", "M'15", "M'16"]
clinresult = 2
elif pat_id == 'la05':
# [2:4 7:15 21:39 42:82 85:89 96:101 103:114 116:121 ...
# 126:145 147:152 154:157 160:161 165:180 182:191];
included_indices = np.concatenate((np.arange(1, 4), np.arange(6, 15),
np.arange(
20, 39), np.arange(
41, 82), np.arange(
84, 89),
np.arange(95, 101), np.arange(102, 114), np.arange(115, 121),
np.arange(125, 145), np.arange(146, 152), np.arange(153, 157),
np.arange(159, 161), np.arange(164, 180), np.arange(181, 191)))
onsetelecs = ["T'1", "T'2", "D'1", "D'2"]
clinresult = 1
elif pat_id == 'la06':
# [1:4 7:12 14:17 19 21:33 37 46:47 50:58 61:62 70:73 77:82 ...
# 84:102 104:112 114:119];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
13, 17), np.arange(
18, 19), np.arange(
20, 33),
np.arange(36, 37), np.arange(45, 47), np.arange(49, 58),
np.arange(60, 62), np.arange(69, 73), np.arange(76, 82),
np.arange(83, 102), np.arange(103, 112), np.arange(113, 119)))
onsetelecs = ["Q'3", "Q'4", "R'3", "R'4"]
clinresult = 2
elif pat_id == 'la07':
# [1:18 22:23 25 34:37 44 48:51 54:55 57:69 65:66 68:78 ...
# 82:83 85:93 96:107 114:120];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 18), np.arange(21, 23),
np.arange(
24, 25), np.arange(
33, 37), np.arange(
43, 44),
np.arange(47, 51), np.arange(53, 55), np.arange(56, 69),
np.arange(64, 66), np.arange(67, 78), np.arange(81, 83),
np.arange(84, 93), np.arange(95, 107), np.arange(113, 120)))
onsetelecs = ["T'1", "T'3", "R'8", "R'9"]
clinresult = 1
elif pat_id == 'la08':
# [1:2 8:13 15:19 22 25 27:30 34:35 46:48 50:57 ...
# 65:68 70:72 76:78 80:84 87:93 100:102 105:108 110:117 123:127 130:131 133:137 ...
# 140:146]
included_indices = np.concatenate((np.arange(0, 2), np.arange(7, 13),
np.arange(
14, 19), np.arange(
21, 22), np.arange(
24, 25),
np.arange(26, 30), np.arange(33, 35), np.arange(45, 48),
np.arange(49, 57), np.arange(64, 68), np.arange(69, 72),
np.arange(75, 78), np.arange(79, 84), np.arange(86, 93),
np.arange(99, 102), np.arange(104, 108), np.arange(109, 117),
np.arange(122, 127), np.arange(129, 131), np.arange(132, 137),
np.arange(139, 146)))
onsetelecs = ["Q2"]
clinresult = 2
elif pat_id == 'la09':
# [3:4 7:17 21:28 33:38 42:47 51:56 58:62 64:69 ...
# 73:80 82:84 88:92 95:103 107:121 123 126:146 150:161 164:169 179:181 ...
# 183:185 187:191]
# 2/7/18 - got rid of F10 = looking at edf was super noisy
included_indices = np.concatenate((np.arange(2, 3), np.arange(6, 17),
np.arange(
20, 28), np.arange(
32, 38), np.arange(
41, 47),
np.arange(
50, 56), np.arange(
57, 62), np.arange(
63, 66), np.arange(
67, 69),
np.arange(72, 80), np.arange(81, 84), np.arange(87, 92),
np.arange(94, 103), np.arange(106, 121), np.arange(122, 123),
np.arange(125, 146), np.arange(149, 161), np.arange(163, 169),
np.arange(178, 181), np.arange(182, 185), np.arange(186, 191)))
onsetelecs = ["X'1", "X'2", "X'3", "X'4", "U'1", "U'2"]
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(20, 39), np.arange(41, 189)))
onsetelecs = ["P'1", "P'2"]
clinresult = 2
elif pat_id == 'la10':
# [1:4 7:13 17:19 23:32 36:37 46:47 50 54:59 62:66 68:79 82:96 ...
# 99:106 108:113 117:127 135:159 163:169 172:173 176:179 181:185];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 13),
np.arange(
16, 19), np.arange(
22, 32), np.arange(
35, 37),
np.arange(45, 47), np.arange(49, 50), np.arange(53, 59),
np.arange(61, 66), np.arange(67, 79), np.arange(81, 96),
np.arange(98, 106), | np.arange(107, 113) | numpy.arange |
from os.path import join
import pickle
import numpy as np
MACHINE_ROOT = '/data3/floraxue/cs294/exp/lsun_pretrained/machine_labels'
accs = []
corrects = []
pos_reads = []
pos_corrects = []
neg_reads = []
neg_corrects = []
print('trial', 'pos_reads', 'neg_reads', 'unsure', 'pos_corrects', 'neg_corrects', 'corrects', 'acc')
for trial in range(1,6):
pos_path = join(MACHINE_ROOT, 'cat_trial_{}_pos.txt'.format(trial))
neg_path = join(MACHINE_ROOT, 'cat_trial_{}_neg.txt'.format(trial))
unsure_path = join(MACHINE_ROOT, 'cat_trial_{}_unsure.txt'.format(trial))
GT_PATH = '/data3/floraxue/cs294/active-rl-data/ground_truth/cat_gt_cached.p'
dic = pickle.load(open(GT_PATH, 'rb'))
correct = 0
pos_correct = 0
neg_correct = 0
with open(pos_path, 'r') as fp:
lines = fp.readlines()
pos_read = len(lines)
for line in lines:
key = line.strip()
target = dic[key]
if target == 1:
correct += 1
pos_correct += 1
with open(neg_path, 'r') as fp:
lines = fp.readlines()
neg_read = len(lines)
for line in lines:
key = line.strip()
target = dic[key]
if target == -1:
correct += 1
neg_correct += 1
with open(unsure_path, 'r') as fp:
lines = fp.readlines()
total = len(lines)
acc = correct / (pos_read + neg_read)
accs.append(acc)
corrects.append(correct)
pos_reads.append(pos_read)
pos_corrects.append(pos_correct)
neg_reads.append(neg_read)
neg_corrects.append(neg_correct)
print(trial, pos_read, neg_read, total, pos_correct,neg_correct, correct, acc)
all_correct = np.sum(corrects)
all_reads = np.sum(pos_reads) + np.sum(neg_reads)
print("Final result")
print("acc", all_correct / all_reads)
print("pos all", np.sum(pos_reads))
print("pos correct", np.sum(pos_corrects))
print("neg all", | np.sum(neg_reads) | numpy.sum |
#NUMBA ############################################
import numba as nb
import numpy as np
import math as m
@nb.njit
def dummy():
return None
@nb.njit(cache=True,error_model='numpy')
def fit_multiplicative_offset_jitter(x0,f,y,dy):
off=x0[0]
jit=x0[1]
newerr=np.sqrt((dy)**2+jit**2)/off
lnL=-0.5*np.sum(((y/off-f)/(newerr))**2.0+np.log(2.0*np.pi)+np.log(newerr**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_only_multiplicative_offset(x0,f,y,dy):
off=x0
lnL=-0.5*np.sum(((y/off-f)/(dy/off))**2.0+np.log(2.0*np.pi)+np.log((dy/off)**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_linear_offset_jitter(x0,f,y,dy):
off=x0[0]
jit=x0[1]
lnL=-0.5*np.sum(((y-off-f)/(np.sqrt(dy**2+jit**2)))**2.0+np.log(2.0*np.pi)+np.log(dy**2+jit**2))
return -lnL
@nb.njit(cache=True,error_model='numpy')
def fit_only_linear_offset(x0,f,y,dy):
off=x0
lnL=-0.5*np.sum(((y-off-f)/(dy))**2.0+ | np.log(2.0*np.pi) | numpy.log |
import math
import warnings
from copy import copy, deepcopy
from datetime import datetime
from typing import Mapping, MutableMapping, MutableSequence, Optional
import numpy as np # type: ignore
import pytest # type: ignore
from rads.rpn import (
ABS,
ACOS,
ACOSD,
ACOSH,
ADD,
AND,
ASIN,
ASIND,
ASINH,
ATAN,
ATAN2,
ATAND,
ATANH,
AVG,
BOXCAR,
BTEST,
CEIL,
CEILING,
COS,
COSD,
COSH,
D2R,
DIF,
DIV,
DUP,
DXDY,
EQ,
EXCH,
EXP,
FLOOR,
FMOD,
GAUSS,
GE,
GT,
HYPOT,
IAND,
INRANGE,
INV,
IOR,
ISAN,
ISNAN,
LE,
LOG,
LOG10,
LT,
MAX,
MIN,
MUL,
NAN,
NE,
NEG,
NINT,
OR,
PI,
POP,
POW,
R2,
R2D,
RINT,
SIN,
SIND,
SINH,
SQR,
SQRT,
SUB,
SUM,
TAN,
TAND,
TANH,
YMDHMS,
CompleteExpression,
E,
Expression,
Literal,
StackUnderflowError,
Token,
Variable,
token,
)
from rads.typing import FloatOrArray
GOLDEN_RATIO = math.log((1 + math.sqrt(5)) / 2)
class TestLiteral:
def test_init(self):
Literal(3)
Literal(3.14)
with pytest.raises(TypeError):
Literal("not a number") # type: ignore
def test_pops(self):
assert Literal(3).pops == 0
def test_puts(self):
assert Literal(3).puts == 1
def test_value(self):
assert Literal(3).value == 3
assert Literal(3.14).value == 3.14
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment: MutableMapping[str, FloatOrArray] = {}
assert Literal(3.14)(stack, environment) is None
assert Literal(2.71)(stack, environment) is None
assert stack == [3.14, 2.71]
assert environment == {}
def test_eq(self):
assert Literal(3.14) == Literal(3.14)
assert not Literal(3.14) == Literal(2.71)
assert not Literal(3.14) == 3.14
def test_ne(self):
assert Literal(3.14) != Literal(2.71)
assert not Literal(3.14) != Literal(3.14)
assert Literal(3.14) != 3.14
def test_lt(self):
assert Literal(2.71) < Literal(3.14)
assert not Literal(3.14) < Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) < 3.14
with pytest.raises(TypeError):
2.71 < Literal(3.14)
def test_le(self):
assert Literal(2.71) <= Literal(3.14)
assert Literal(3.14) <= Literal(3.14)
assert not Literal(3.14) <= Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) <= 3.14
with pytest.raises(TypeError):
2.71 <= Literal(3.14)
def test_gt(self):
assert Literal(3.14) > Literal(2.71)
assert not Literal(2.71) > Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) > 2.71
with pytest.raises(TypeError):
3.14 > Literal(2.71)
def test_ge(self):
assert Literal(3.14) >= Literal(2.71)
assert Literal(3.14) >= Literal(3.14)
assert not Literal(2.71) >= Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) >= 2.71
with pytest.raises(TypeError):
3.14 >= Literal(2.71)
def test_repr(self):
assert repr(Literal(3)) == "Literal(3)"
assert repr(Literal(3.14)) == "Literal(3.14)"
def test_str(self):
assert str(Literal(3)) == "3"
assert str(Literal(3.14)) == "3.14"
def test_pi(self):
assert PI.value == pytest.approx(np.pi)
def test_e(self):
assert E.value == pytest.approx(np.e)
class TestVariable:
def test_init(self):
Variable("alt")
with pytest.raises(ValueError):
Variable("3")
with pytest.raises(ValueError):
Variable("3name")
with pytest.raises(TypeError):
Variable(3) # type: ignore
with pytest.raises(TypeError):
Variable(3.14) # type: ignore
def test_pops(self):
assert Variable("alt").pops == 0
def test_puts(self):
assert Variable("alt").puts == 1
def test_name(self):
assert Variable("alt").name == "alt"
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment = {"alt": np.array([1, 2, 3]), "dry_tropo": 4, "wet_tropo": 5}
assert Variable("wet_tropo")(stack, environment) is None
assert Variable("alt")(stack, environment) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
assert len(environment) == 3
assert "alt" in environment
assert "dry_tropo" in environment
assert "wet_tropo" in environment
assert np.all(environment["alt"] == np.array([1, 2, 3]))
assert environment["dry_tropo"] == 4
assert environment["wet_tropo"] == 5
with pytest.raises(KeyError):
assert Variable("alt")(stack, {}) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
def test_eq(self):
assert Variable("alt") == Variable("alt")
assert not Variable("alt") == Variable("dry_tropo")
assert not Variable("alt") == "alt"
def test_ne(self):
assert Variable("alt") != Variable("dry_tropo")
assert not Variable("alt") != Variable("alt")
assert Variable("alt") != "alt"
def test_repr(self):
assert repr(Variable("alt")) == "Variable('alt')"
def test_str(self):
assert str(Variable("alt")) == "alt"
def contains_array(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
if isinstance(item, np.ndarray):
return True
return False
def contains_nan(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
try:
if math.isnan(item):
return True
except TypeError:
pass
return False
def assert_token(
operator: Token,
pre_stack: MutableSequence[FloatOrArray],
post_stack: MutableSequence[FloatOrArray],
environment: Optional[Mapping[str, FloatOrArray]] = None,
*,
approx: bool = False,
rtol: float = 1e-15,
atol: float = 1e-16,
) -> None:
"""Assert that a token modifies the stack properly.
Parameters
----------
operator
Operator to test.
pre_stack
Stack state before calling the operator.
post_stack
Desired stack state after calling the operator.
environment
Optional dictionary like object providing the environment for
variable lookup.
approx
Set to true to use approximate equality instead of exact.
rtol
Relative tolerance. Only used if :paramref:`approx` is True.
atol
Absolute tolerance. Only used if :paramref:`approx` is True.
Raises
------
AssertionError
If the operator does not produce the proper post stack state or the
environment parameter is changed.
"""
if not environment:
environment = {"dont_touch": 5}
original_environment = deepcopy(environment)
stack = pre_stack
operator(stack, environment)
# environment should be unchanged
assert environment == original_environment
# check stack
if approx or contains_nan(post_stack) or contains_array(post_stack):
assert len(stack) == len(post_stack)
for a, b in zip(stack, post_stack):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if approx:
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, equal_nan=True
)
else:
np.testing.assert_equal(a, b)
else:
if math.isnan(b):
assert math.isnan(a)
elif approx:
assert a == pytest.approx(b, rel=rtol, abs=atol)
else:
assert a == b
else:
assert stack == post_stack
class TestSUBOperator:
def test_repr(self):
assert repr(SUB) == "SUB"
def test_pops(self):
assert SUB.pops == 2
def test_puts(self):
assert SUB.puts == 1
def test_no_copy(self):
assert copy(SUB) is SUB
assert deepcopy(SUB) is SUB
def test_call(self):
assert_token(SUB, [2, 4], [-2])
assert_token(SUB, [2, np.array([4, 1])], [np.array([-2, 1])])
assert_token(SUB, [np.array([4, 1]), 2], [np.array([2, -1])])
assert_token(SUB, [np.array([4, 1]), np.array([1, 4])], [np.array([3, -3])])
# extra stack elements
assert_token(SUB, [0, 2, 4], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUB([], {})
with pytest.raises(StackUnderflowError):
SUB([1], {})
class TestADDOperator:
def test_repr(self):
assert repr(ADD) == "ADD"
def test_pops(self):
assert ADD.pops == 2
def test_puts(self):
assert ADD.puts == 1
def test_no_copy(self):
assert copy(ADD) is ADD
assert deepcopy(ADD) is ADD
def test_call(self):
assert_token(ADD, [2, 4], [6])
assert_token(ADD, [2, np.array([4, 1])], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), 2], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), np.array([1, 4])], [np.array([5, 5])])
# extra stack elements
assert_token(ADD, [0, 2, 4], [0, 6])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ADD([], {})
with pytest.raises(StackUnderflowError):
ADD([1], {})
class TestMULOperator:
def test_repr(self):
assert repr(MUL) == "MUL"
def test_pops(self):
assert MUL.pops == 2
def test_puts(self):
assert MUL.puts == 1
def test_no_copy(self):
assert copy(MUL) is MUL
assert deepcopy(MUL) is MUL
def test_call(self):
assert_token(MUL, [2, 4], [8])
assert_token(MUL, [2, np.array([4, 1])], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), 2], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), np.array([1, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(MUL, [0, 2, 4], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MUL([], {})
with pytest.raises(StackUnderflowError):
MUL([1], {})
class TestPOPOperator:
def test_repr(self):
assert repr(POP) == "POP"
def test_pops(self):
assert POP.pops == 1
def test_puts(self):
assert POP.puts == 0
def test_no_copy(self):
assert copy(POP) is POP
assert deepcopy(POP) is POP
def test_call(self):
assert_token(POP, [1], [])
assert_token(POP, [1, 2], [1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POP([], {})
class TestNEGOperator:
def test_repr(self):
assert repr(NEG) == "NEG"
def test_pops(self):
assert NEG.pops == 1
def test_puts(self):
assert NEG.puts == 1
def test_no_copy(self):
assert copy(NEG) is NEG
assert deepcopy(NEG) is NEG
def test_call(self):
assert_token(NEG, [2], [-2])
assert_token(NEG, [-2], [2])
assert_token(NEG, [np.array([4, -1])], [np.array([-4, 1])])
assert_token(NEG, [np.array([-4, 1])], [np.array([4, -1])])
# extra stack elements
assert_token(NEG, [0, 2], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NEG([], {})
class TestABSOperator:
def test_repr(self):
assert repr(ABS) == "ABS"
def test_pops(self):
assert ABS.pops == 1
def test_puts(self):
assert ABS.puts == 1
def test_no_copy(self):
assert copy(ABS) is ABS
assert deepcopy(ABS) is ABS
def test_call(self):
assert_token(ABS, [2], [2])
assert_token(ABS, [-2], [2])
assert_token(ABS, [np.array([4, -1])], [np.array([4, 1])])
assert_token(ABS, [np.array([-4, 1])], [np.array([4, 1])])
# extra stack elements
assert_token(ABS, [0, -2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ABS([], {})
class TestINVOperator:
def test_repr(self):
assert repr(INV) == "INV"
def test_pops(self):
assert INV.pops == 1
def test_puts(self):
assert INV.puts == 1
def test_no_copy(self):
assert copy(INV) is INV
assert deepcopy(INV) is INV
def test_call(self):
assert_token(INV, [2], [0.5])
assert_token(INV, [-2], [-0.5])
assert_token(INV, [np.array([4, -1])], [np.array([0.25, -1])])
assert_token(INV, [np.array([-4, 1])], [np.array([-0.25, 1])])
# extra stack elements
assert_token(INV, [0, 2], [0, 0.5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INV([], {})
class TestSQRTOperator:
def test_repr(self):
assert repr(SQRT) == "SQRT"
def test_pops(self):
assert SQRT.pops == 1
def test_puts(self):
assert SQRT.puts == 1
def test_no_copy(self):
assert copy(SQRT) is SQRT
assert deepcopy(SQRT) is SQRT
def test_call(self):
assert_token(SQRT, [4], [2])
assert_token(SQRT, [np.array([4, 16])], [np.array([2, 4])])
# extra stack elements
assert_token(SQRT, [0, 4], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQRT([], {})
class TestSQROperator:
def test_repr(self):
assert repr(SQR) == "SQR"
def test_pops(self):
assert SQR.pops == 1
def test_puts(self):
assert SQR.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(SQR, [2], [4])
assert_token(SQR, [-2], [4])
assert_token(SQR, [np.array([4, -1])], [np.array([16, 1])])
assert_token(SQR, [np.array([-4, 1])], [np.array([16, 1])])
# extra stack elements
assert_token(SQR, [0, -2], [0, 4])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQR([], {})
class TestEXPOperator:
def test_repr(self):
assert repr(EXP) == "EXP"
def test_pops(self):
assert EXP.pops == 1
def test_puts(self):
assert EXP.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(EXP, [math.log(1)], [1.0], approx=True)
assert_token(EXP, [math.log(2)], [2.0], approx=True)
assert_token(
EXP, [np.array([np.log(4), np.log(1)])], [np.array([4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(EXP, [0, np.log(1)], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXP([], {})
class TestLOGOperator:
def test_repr(self):
assert repr(LOG) == "LOG"
def test_pops(self):
assert LOG.pops == 1
def test_puts(self):
assert LOG.puts == 1
def test_no_copy(self):
assert copy(LOG) is LOG
assert deepcopy(LOG) is LOG
def test_call(self):
assert_token(LOG, [math.e], [1.0], approx=True)
assert_token(LOG, [math.e ** 2], [2.0], approx=True)
assert_token(LOG, [math.e ** -2], [-2.0], approx=True)
assert_token(
LOG,
[np.array([np.e ** 4, np.e ** -1])],
[np.array([4.0, -1.0])],
approx=True,
)
assert_token(
LOG,
[np.array([np.e ** -4, np.e ** 1])],
[np.array([-4.0, 1.0])],
approx=True,
)
# extra stack elements
assert_token(LOG, [0, np.e], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG([], {})
class TestLOG10Operator:
def test_repr(self):
assert repr(LOG10) == "LOG10"
def test_pops(self):
assert LOG10.pops == 1
def test_puts(self):
assert LOG10.puts == 1
def test_no_copy(self):
assert copy(LOG10) is LOG10
assert deepcopy(LOG10) is LOG10
def test_call(self):
assert_token(LOG10, [10], [1.0], approx=True)
assert_token(LOG10, [10 ** 2], [2.0], approx=True)
assert_token(LOG10, [10 ** -2], [-2.0], approx=True)
assert_token(
LOG10, [np.array([10 ** 4, 10 ** -1])], [np.array([4.0, -1.0])], approx=True
)
assert_token(
LOG10, [np.array([10 ** -4, 10 ** 1])], [np.array([-4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(LOG10, [0, 10], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG10([], {})
class TestSINOperator:
def test_repr(self):
assert repr(SIN) == "SIN"
def test_pops(self):
assert SIN.pops == 1
def test_puts(self):
assert SIN.puts == 1
def test_no_copy(self):
assert copy(SIN) is SIN
assert deepcopy(SIN) is SIN
def test_call(self):
assert_token(SIN, [0.0], [0.0], approx=True)
assert_token(SIN, [math.pi / 6], [1 / 2], approx=True)
assert_token(SIN, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(SIN, [math.pi / 3], [math.sqrt(3) / 2], approx=True)
assert_token(SIN, [math.pi / 2], [1.0], approx=True)
assert_token(
SIN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIN, [0, math.pi / 2], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIN([], {})
class TestCOSOperator:
def test_repr(self):
assert repr(COS) == "COS"
def test_pops(self):
assert COS.pops == 1
def test_puts(self):
assert COS.puts == 1
def test_no_copy(self):
assert copy(COS) is COS
assert deepcopy(COS) is COS
def test_call(self):
assert_token(COS, [0.0], [1.0], approx=True)
assert_token(COS, [math.pi / 6], [math.sqrt(3) / 2], approx=True)
assert_token(COS, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(COS, [math.pi / 3], [1 / 2], approx=True)
assert_token(COS, [math.pi / 2], [0.0], approx=True)
assert_token(
COS,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COS,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COS, [0, math.pi / 2], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COS([], {})
class TestTANOperator:
def test_repr(self):
assert repr(TAN) == "TAN"
def test_pops(self):
assert TAN.pops == 1
def test_puts(self):
assert TAN.puts == 1
def test_no_copy(self):
assert copy(TAN) is TAN
assert deepcopy(TAN) is TAN
def test_call(self):
assert_token(TAN, [0.0], [0.0], approx=True)
assert_token(TAN, [math.pi / 6], [1 / math.sqrt(3)], approx=True)
assert_token(TAN, [math.pi / 4], [1.0], approx=True)
assert_token(TAN, [math.pi / 3], [math.sqrt(3)], approx=True)
assert_token(
TAN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAN, [0, math.pi / 4], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAN([], {})
class TestSINDOperator:
def test_repr(self):
assert repr(SIND) == "SIND"
def test_pops(self):
assert SIND.pops == 1
def test_puts(self):
assert SIND.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(SIND, [0], [0.0], approx=True)
assert_token(SIND, [30], [1 / 2], approx=True)
assert_token(SIND, [45], [1 / math.sqrt(2)], approx=True)
assert_token(SIND, [60], [math.sqrt(3) / 2], approx=True)
assert_token(SIND, [90], [1.0], approx=True)
assert_token(
SIND,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIND,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIND, [0, 90], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIND([], {})
class TestCOSDOperator:
def test_repr(self):
assert repr(COSD) == "COSD"
def test_pops(self):
assert COSD.pops == 1
def test_puts(self):
assert COSD.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(COSD, [0], [1.0], approx=True)
assert_token(COSD, [30], [math.sqrt(3) / 2], approx=True)
assert_token(COSD, [45], [1 / math.sqrt(2)], approx=True)
assert_token(COSD, [60], [1 / 2], approx=True)
assert_token(COSD, [90], [0.0], approx=True)
assert_token(
COSD,
[np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COSD,
[-np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COSD, [0, 90], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSD([], {})
class TestTANDOperator:
def test_repr(self):
assert repr(TAND) == "TAND"
def test_pops(self):
assert TAND.pops == 1
def test_puts(self):
assert TAND.puts == 1
def test_no_copy(self):
assert copy(TAND) is TAND
assert deepcopy(TAND) is TAND
def test_call(self):
assert_token(TAND, [0], [0], approx=True)
assert_token(TAND, [30], [1 / math.sqrt(3)], approx=True)
assert_token(TAND, [45], [1.0], approx=True)
assert_token(TAND, [60], [math.sqrt(3)], approx=True)
assert_token(
TAND,
[np.array([0, 30, 45, 60])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAND,
[-np.array([0, 30, 45, 60])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAND, [0, 45], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAND([], {})
class TestSINHOperator:
def test_repr(self):
assert repr(SINH) == "SINH"
def test_pops(self):
assert SINH.pops == 1
def test_puts(self):
assert SINH.puts == 1
def test_no_copy(self):
assert copy(SINH) is SINH
assert deepcopy(SINH) is SINH
def test_call(self):
assert_token(SINH, [0.0], [0.0], approx=True)
assert_token(SINH, [GOLDEN_RATIO], [0.5], approx=True)
assert_token(
SINH, [np.array([0.0, GOLDEN_RATIO])], [np.array([0.0, 0.5])], approx=True
)
# extra stack elements
assert_token(SINH, [0, GOLDEN_RATIO], [0, 0.5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SINH([], {})
class TestCOSHOperator:
def test_repr(self):
assert repr(COSH) == "COSH"
def test_pops(self):
assert COSH.pops == 1
def test_puts(self):
assert COSH.puts == 1
def test_no_copy(self):
assert copy(COSH) is COSH
assert deepcopy(COSH) is COSH
def test_call(self):
assert_token(COSH, [0.0], [1.0], approx=True)
assert_token(COSH, [GOLDEN_RATIO], [math.sqrt(5) / 2], approx=True)
assert_token(
COSH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([1.0, np.sqrt(5) / 2])],
approx=True,
)
# extra stack elements
assert_token(COSH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSH([], {})
class TestTANHOperator:
def test_repr(self):
assert repr(TANH) == "TANH"
def test_pops(self):
assert TANH.pops == 1
def test_puts(self):
assert TANH.puts == 1
def test_no_copy(self):
assert copy(TANH) is TANH
assert deepcopy(TANH) is TANH
def test_call(self):
assert_token(TANH, [0.0], [0.0], approx=True)
assert_token(TANH, [GOLDEN_RATIO], [math.sqrt(5) / 5], approx=True)
assert_token(
TANH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([0.0, np.sqrt(5) / 5])],
approx=True,
)
# extra stack elements
assert_token(TANH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TANH([], {})
class TestASINOperator:
def test_repr(self):
assert repr(ASIN) == "ASIN"
def test_pops(self):
assert ASIN.pops == 1
def test_puts(self):
assert ASIN.puts == 1
def test_no_copy(self):
assert copy(ASIN) is ASIN
assert deepcopy(ASIN) is ASIN
def test_call(self):
assert_token(ASIN, [0.0], [0.0], approx=True)
assert_token(ASIN, [1 / 2], [math.pi / 6], approx=True)
assert_token(ASIN, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ASIN, [math.sqrt(3) / 2], [math.pi / 3], approx=True)
assert_token(ASIN, [1.0], [math.pi / 2], approx=True)
assert_token(
ASIN,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
ASIN,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ASIN, [0, 1.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIN([], {})
class TestACOSOperator:
def test_repr(self):
assert repr(ACOS) == "ACOS"
def test_pops(self):
assert ACOS.pops == 1
def test_puts(self):
assert ACOS.puts == 1
def test_no_copy(self):
assert copy(ACOS) is ACOS
assert deepcopy(ACOS) is ACOS
def test_call(self):
assert_token(ACOS, [1.0], [0.0], approx=True)
assert_token(ACOS, [math.sqrt(3) / 2], [math.pi / 6], approx=True)
assert_token(ACOS, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ACOS, [1 / 2], [math.pi / 3], approx=True)
assert_token(ACOS, [0.0], [math.pi / 2], approx=True)
assert_token(
ACOS,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ACOS, [0, 0.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOS([], {})
class TestATANOperator:
def test_repr(self):
assert repr(ATAN) == "ATAN"
def test_pops(self):
assert ATAN.pops == 1
def test_puts(self):
assert ATAN.puts == 1
def test_no_copy(self):
assert copy(ATAN) is ATAN
assert deepcopy(ATAN) is ATAN
def test_call(self):
assert_token(ATAN, [0.0], [0.0], approx=True)
assert_token(ATAN, [1 / math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN, [1.0], [math.pi / 4], approx=True)
assert_token(ATAN, [math.sqrt(3)], [math.pi / 3], approx=True)
assert_token(
ATAN,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
assert_token(
ATAN,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
# extra stack elements
assert_token(ATAN, [0, 1.0], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN([], {})
class TestASINDOperator:
def test_repr(self):
assert repr(ASIND) == "ASIND"
def test_pops(self):
assert ASIND.pops == 1
def test_puts(self):
assert ASIND.puts == 1
def test_no_copy(self):
assert copy(ASIND) is ASIND
assert deepcopy(ASIND) is ASIND
def test_call(self):
assert_token(ASIND, [0.0], [0], approx=True)
assert_token(ASIND, [1 / 2], [30], approx=True)
assert_token(ASIND, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ASIND, [math.sqrt(3) / 2], [60], approx=True)
assert_token(ASIND, [1.0], [90], approx=True)
assert_token(
ASIND,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
ASIND,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ASIND, [0, 1.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIND([], {})
class TestACOSDOperator:
def test_repr(self):
assert repr(ACOSD) == "ACOSD"
def test_pops(self):
assert ACOSD.pops == 1
def test_puts(self):
assert ACOSD.puts == 1
def test_no_copy(self):
assert copy(ACOSD) is ACOSD
assert deepcopy(ACOSD) is ACOSD
def test_call(self):
assert_token(ACOSD, [1.0], [0], approx=True)
assert_token(ACOSD, [math.sqrt(3) / 2], [30], approx=True)
assert_token(ACOSD, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ACOSD, [1 / 2], [60], approx=True)
assert_token(ACOSD, [0.0], [90], approx=True)
assert_token(
ACOSD,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ACOSD, [0, 0.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSD([], {})
class TestATANDOperator:
def test_repr(self):
assert repr(ATAND) == "ATAND"
def test_pops(self):
assert ATAND.pops == 1
def test_puts(self):
assert ATAND.puts == 1
def test_no_copy(self):
assert copy(ATAND) is ATAND
assert deepcopy(ATAND) is ATAND
def test_call(self):
assert_token(ATAND, [0.0], [0], approx=True)
assert_token(ATAND, [1 / math.sqrt(3)], [30], approx=True)
assert_token(ATAND, [1.0], [45], approx=True)
assert_token(ATAND, [math.sqrt(3)], [60], approx=True)
assert_token(
ATAND,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0, 30, 45, 60])],
approx=True,
)
assert_token(
ATAND,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0, 30, 45, 60])],
approx=True,
)
# extra stack elements
assert_token(ATAND, [0, 1.0], [0, 45], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAND([], {})
class TestASINHOperator:
def test_repr(self):
assert repr(ASINH) == "ASINH"
def test_pops(self):
assert ASINH.pops == 1
def test_puts(self):
assert ASINH.puts == 1
def test_no_copy(self):
assert copy(ASINH) is ASINH
assert deepcopy(ASINH) is ASINH
def test_call(self):
assert_token(ASINH, [0.0], [0.0], approx=True)
assert_token(ASINH, [0.5], [GOLDEN_RATIO], approx=True)
assert_token(
ASINH, [np.array([0.0, 0.5])], [np.array([0.0, GOLDEN_RATIO])], approx=True
)
# extra stack elements
assert_token(ASINH, [0, 0.5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASINH([], {})
class TestACOSHOperator:
def test_repr(self):
assert repr(ACOSH) == "ACOSH"
def test_pops(self):
assert ACOSH.pops == 1
def test_puts(self):
assert ACOSH.puts == 1
def test_no_copy(self):
assert copy(ACOSH) is ACOSH
assert deepcopy(ACOSH) is ACOSH
def test_call(self):
assert_token(ACOSH, [1.0], [0.0], approx=True)
assert_token(ACOSH, [math.sqrt(5) / 2], [GOLDEN_RATIO], approx=True)
assert_token(
ACOSH,
[np.array([1.0, np.sqrt(5) / 2])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ACOSH, [0, math.sqrt(5) / 2], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSH([], {})
class TestATANHOperator:
def test_repr(self):
assert repr(ATANH) == "ATANH"
def test_pops(self):
assert ATANH.pops == 1
def test_puts(self):
assert ATANH.puts == 1
def test_no_copy(self):
assert copy(ATANH) is ATANH
assert deepcopy(ATANH) is ATANH
def test_call(self):
assert_token(ATANH, [0.0], [0.0], approx=True)
assert_token(ATANH, [math.sqrt(5) / 5], [GOLDEN_RATIO], approx=True)
assert_token(
ATANH,
[np.array([0.0, np.sqrt(5) / 5])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ATANH, [0, math.sqrt(5) / 5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATANH([], {})
class TestISNANOperator:
def test_repr(self):
assert repr(ISNAN) == "ISNAN"
def test_pops(self):
assert ISNAN.pops == 1
def test_puts(self):
assert ISNAN.puts == 1
def test_no_copy(self):
assert copy(ISNAN) is ISNAN
assert deepcopy(ISNAN) is ISNAN
def test_call(self):
assert_token(ISNAN, [2], [False])
assert_token(ISNAN, [float("nan")], [True])
assert_token(ISNAN, [np.array([4, np.nan])], [np.array([False, True])])
assert_token(ISNAN, [np.array([np.nan, 1])], [np.array([True, False])])
# extra stack elements
assert_token(ISNAN, [0, float("nan")], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISNAN([], {})
class TestISANOperator:
def test_repr(self):
assert repr(ISAN) == "ISAN"
def test_pops(self):
assert ISAN.pops == 1
def test_puts(self):
assert ISAN.puts == 1
def test_no_copy(self):
assert copy(ISAN) is ISAN
assert deepcopy(ISAN) is ISAN
def test_call(self):
assert_token(ISAN, [2], [True])
assert_token(ISAN, [float("nan")], [False])
assert_token(ISAN, [np.array([4, np.nan])], [np.array([True, False])])
assert_token(ISAN, [np.array([np.nan, 1])], [np.array([False, True])])
# extra stack elements
assert_token(ISAN, [0, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISAN([], {})
class TestRINTOperator:
def test_repr(self):
assert repr(RINT) == "RINT"
def test_pops(self):
assert RINT.pops == 1
def test_puts(self):
assert RINT.puts == 1
def test_no_copy(self):
assert copy(RINT) is RINT
assert deepcopy(RINT) is RINT
def test_call(self):
assert_token(RINT, [1.6], [2])
assert_token(RINT, [2.4], [2])
assert_token(RINT, [-1.6], [-2])
assert_token(RINT, [-2.4], [-2])
assert_token(RINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(RINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(RINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
RINT([], {})
class TestNINTOperator:
def test_repr(self):
assert repr(NINT) == "NINT"
def test_pops(self):
assert NINT.pops == 1
def test_puts(self):
assert NINT.puts == 1
def test_no_copy(self):
assert copy(NINT) is NINT
assert deepcopy(NINT) is NINT
def test_call(self):
assert_token(NINT, [1.6], [2])
assert_token(NINT, [2.4], [2])
assert_token(NINT, [-1.6], [-2])
assert_token(NINT, [-2.4], [-2])
assert_token(NINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(NINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(NINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NINT([], {})
class TestCEILOperator:
def test_repr(self):
assert repr(CEIL) == "CEIL"
def test_pops(self):
assert CEIL.pops == 1
def test_puts(self):
assert CEIL.puts == 1
def test_no_copy(self):
assert copy(CEIL) is CEIL
assert deepcopy(CEIL) is CEIL
def test_call(self):
assert_token(CEIL, [1.6], [2])
assert_token(CEIL, [2.4], [3])
assert_token(CEIL, [-1.6], [-1])
assert_token(CEIL, [-2.4], [-2])
assert_token(CEIL, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEIL, [ | np.array([-1.6, -2.4]) | numpy.array |
"""Module that contains the command line app."""
import builtins
import click
import inspect
import logging
import matplotlib.pyplot as plt
import numpy as np
import warnings
import yaml
from os import path, remove
from pathlib import Path
from . import _cfg, cache_tools, global_params, plotting
from . import wrapper as lib
def _get_config(config=None):
if config is None:
config = path.expanduser(path.join("~", ".21cmfast", "runconfig_example.yml"))
with open(config) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
return cfg
def _ctx_to_dct(args):
dct = {}
j = 0
while j < len(args):
arg = args[j]
if "=" in arg:
a = arg.split("=")
dct[a[0].replace("--", "")] = a[-1]
j += 1
else:
dct[arg.replace("--", "")] = args[j + 1]
j += 2
return dct
def _update(obj, ctx):
# Try to use the extra arguments as an override of config.
kk = list(ctx.keys())
for k in kk:
# noinspection PyProtectedMember
if hasattr(obj, k):
try:
val = getattr(obj, "_" + k)
setattr(obj, "_" + k, type(val)(ctx[k]))
ctx.pop(k)
except (AttributeError, TypeError):
try:
val = getattr(obj, k)
setattr(obj, k, type(val)(ctx[k]))
ctx.pop(k)
except AttributeError:
pass
def _override(ctx, *param_dicts):
# Try to use the extra arguments as an override of config.
if ctx.args:
ctx = _ctx_to_dct(ctx.args)
for p in param_dicts:
_update(p, ctx)
# Also update globals, always.
_update(global_params, ctx)
if ctx:
warnings.warn("The following arguments were not able to be set: %s" % ctx)
main = click.Group()
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
help="Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)",
)
@click.option(
"--regen/--no-regen",
default=False,
help="Whether to force regeneration of init/perturb files if they already exist.",
)
@click.option(
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="directory to write data and plots to -- must exist.",
)
@click.option(
"--seed",
type=int,
default=None,
help="specify a random seed for the initial conditions",
)
@click.pass_context
def init(ctx, config, regen, direc, seed):
"""Run a single iteration of 21cmFAST init, saving results to file.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
config : str
Path to the configuration file.
regen : bool
Whether to regenerate all data, even if found in cache.
direc : str
Where to search for cached items.
seed : int
Random seed used to generate data.
"""
cfg = _get_config(config)
# Set user/cosmo params from config.
user_params = lib.UserParams(**cfg.get("user_params", {}))
cosmo_params = lib.CosmoParams(**cfg.get("cosmo_params", {}))
_override(ctx, user_params, cosmo_params)
lib.initial_conditions(
user_params=user_params,
cosmo_params=cosmo_params,
regenerate=regen,
write=True,
direc=direc,
random_seed=seed,
)
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.argument("redshift", type=float)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
help="Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)",
)
@click.option(
"--regen/--no-regen",
default=False,
help="Whether to force regeneration of init/perturb files if they already exist.",
)
@click.option(
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="directory to write data and plots to -- must exist.",
)
@click.option(
"--seed",
type=int,
default=None,
help="specify a random seed for the initial conditions",
)
@click.pass_context
def perturb(ctx, redshift, config, regen, direc, seed):
"""Run 21cmFAST perturb_field at the specified redshift, saving results to file.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
redshift : float
Redshift at which to generate perturbed field.
config : str
Path to the configuration file.
regen : bool
Whether to regenerate all data, even if found in cache.
direc : str
Where to search for cached items.
seed : int
Random seed used to generate data.
"""
cfg = _get_config(config)
# Set user/cosmo params from config.
user_params = lib.UserParams(**cfg.get("user_params", {}))
cosmo_params = lib.CosmoParams(**cfg.get("cosmo_params", {}))
_override(ctx, user_params, cosmo_params)
lib.perturb_field(
redshift=redshift,
user_params=user_params,
cosmo_params=cosmo_params,
regenerate=regen,
write=True,
direc=direc,
random_seed=seed,
)
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.argument("redshift", type=float)
@click.option(
"-p",
"--prev_z",
type=float,
default=None,
help="Previous redshift (the spin temperature data must already exist for this redshift)",
)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
help="Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)",
)
@click.option(
"--regen/--no-regen",
default=False,
help="Whether to force regeneration of init/perturb files if they already exist.",
)
@click.option(
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="directory to write data and plots to -- must exist.",
)
@click.option(
"--seed",
type=int,
default=None,
help="specify a random seed for the initial conditions",
)
@click.pass_context
def spin(ctx, redshift, prev_z, config, regen, direc, seed):
"""Run spin_temperature at the specified redshift, saving results to file.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
redshift : float
The redshift to generate the field at.
prev_z : float
The redshift of a previous box from which to evolve to the current one.
config : str
Path to the configuration file.
regen : bool
Whether to regenerate all data, even if found in cache.
direc : str
Where to search for cached items.
seed : int
Random seed used to generate data.
"""
cfg = _get_config(config)
# Set user/cosmo params from config.
user_params = lib.UserParams(**cfg.get("user_params", {}))
cosmo_params = lib.CosmoParams(**cfg.get("cosmo_params", {}))
flag_options = lib.FlagOptions(
**cfg.get("flag_options", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES
)
astro_params = lib.AstroParams(
**cfg.get("astro_params", {}), INHOMO_RECO=flag_options.INHOMO_RECO
)
_override(ctx, user_params, cosmo_params, astro_params, flag_options)
lib.spin_temperature(
redshift=redshift,
astro_params=astro_params,
flag_options=flag_options,
previous_spin_temp=prev_z,
user_params=user_params,
cosmo_params=cosmo_params,
regenerate=regen,
write=True,
direc=direc,
random_seed=seed,
)
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.argument("redshift", type=float)
@click.option(
"-p",
"--prev_z",
type=float,
default=None,
help="Previous redshift (the ionized box data must already exist for this redshift)",
)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
help="Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)",
)
@click.option(
"--regen/--no-regen",
default=False,
help="Whether to force regeneration of init/perturb files if they already exist.",
)
@click.option(
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="directory to write data and plots to -- must exist.",
)
@click.option(
"--seed",
type=int,
default=None,
help="specify a random seed for the initial conditions",
)
@click.pass_context
def ionize(ctx, redshift, prev_z, config, regen, direc, seed):
"""Run 21cmFAST ionize_box at the specified redshift, saving results to file.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
redshift : float
The redshift to generate the field at.
prev_z : float
The redshift of a previous box from which to evolve to the current one.
config : str
Path to the configuration file.
regen : bool
Whether to regenerate all data, even if found in cache.
direc : str
Where to search for cached items.
seed : int
Random seed used to generate data.
"""
cfg = _get_config(config)
# Set user/cosmo params from config.
user_params = lib.UserParams(**cfg.get("user_params", {}))
cosmo_params = lib.CosmoParams(**cfg.get("cosmo_params", {}))
flag_options = lib.FlagOptions(
**cfg.get("flag_options", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES
)
astro_params = lib.AstroParams(
**cfg.get("astro_params", {}), INHOMO_RECO=flag_options.INHOMO_RECO
)
_override(ctx, user_params, cosmo_params, astro_params, flag_options)
lib.ionize_box(
redshift=redshift,
astro_params=astro_params,
flag_options=flag_options,
previous_ionize_box=prev_z,
user_params=user_params,
cosmo_params=cosmo_params,
regenerate=regen,
write=True,
direc=direc,
random_seed=seed,
)
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.argument("redshift", type=str)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
help="Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)",
)
@click.option(
"--out",
type=click.Path(dir_okay=True, file_okay=True),
default=None,
help="Path to output full Coeval simulation to (directory OK).",
)
@click.option(
"--regen/--no-regen",
default=False,
help="Whether to force regeneration of init/perturb files if they already exist.",
)
@click.option(
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="cache directory",
)
@click.option(
"--seed",
type=int,
default=None,
help="specify a random seed for the initial conditions",
)
@click.pass_context
def coeval(ctx, redshift, config, out, regen, direc, seed):
"""Efficiently generate coeval cubes at a given redshift.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
redshift : float
The redshift to generate the field at.
config : str
Path to the configuration file.
regen : bool
Whether to regenerate all data, even if found in cache.
direc : str
Where to search for cached items.
seed : int
Random seed used to generate data.
"""
if out is not None:
out = Path(out).absolute()
if len(out.suffix) not in (2, 3) and not out.exists():
out.mkdir()
elif not out.parent.exists():
out.parent.mkdir()
try:
redshift = [float(z.strip()) for z in redshift.split(",")]
except TypeError:
raise TypeError("redshift argument must be comma-separated list of values.")
cfg = _get_config(config)
# Set user/cosmo params from config.
user_params = lib.UserParams(**cfg.get("user_params", {}))
cosmo_params = lib.CosmoParams(**cfg.get("cosmo_params", {}))
flag_options = lib.FlagOptions(
**cfg.get("flag_options", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES
)
astro_params = lib.AstroParams(
**cfg.get("astro_params", {}), INHOMO_RECO=flag_options.INHOMO_RECO
)
_override(ctx, user_params, cosmo_params, astro_params, flag_options)
coeval = lib.run_coeval(
redshift=redshift,
astro_params=astro_params,
flag_options=flag_options,
user_params=user_params,
cosmo_params=cosmo_params,
regenerate=regen,
write=True,
direc=direc,
random_seed=seed,
)
if out:
for i, (z, c) in enumerate(zip(redshift, coeval)):
if out.is_dir():
fname = out / c.get_unique_filename()
elif len(redshift) == 1:
fname = out
else:
out = out.parent / f"{out.name}_z{z}{out.suffix}"
c.save(fname)
print(f"Saved Coeval box to {fname}.")
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.argument("redshift", type=float)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
help="Path to the configuration file (default ~/.21cmfast/runconfig_single.yml)",
)
@click.option(
"--out",
type=click.Path(dir_okay=True, file_okay=True),
default=None,
help="Path to output full Lightcone to (directory OK).",
)
@click.option(
"--regen/--no-regen",
default=False,
help="Whether to force regeneration of init/perturb files if they already exist.",
)
@click.option(
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="directory to write data and plots to -- must exist.",
)
@click.option(
"-X",
"--max-z",
type=float,
default=None,
help="maximum redshift of the stored lightcone data",
)
@click.option(
"--seed",
type=int,
default=None,
help="specify a random seed for the initial conditions",
)
@click.pass_context
def lightcone(ctx, redshift, config, out, regen, direc, max_z, seed):
"""Efficiently generate coeval cubes at a given redshift.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
redshift : float
The redshift to generate the field at.
config : str
Path to the configuration file.
regen : bool
Whether to regenerate all data, even if found in cache.
direc : str
Where to search for cached items.
max_z : float
Maximum redshift to include in the produced lightcone.
seed : int
Random seed used to generate data.
"""
cfg = _get_config(config)
if out is not None:
out = Path(out).absolute()
if len(out.suffix) not in (2, 3) and not out.exists():
out.mkdir()
elif not out.parent.exists():
out.parent.mkdir()
# Set user/cosmo params from config.
user_params = lib.UserParams(**cfg.get("user_params", {}))
cosmo_params = lib.CosmoParams(**cfg.get("cosmo_params", {}))
flag_options = lib.FlagOptions(
**cfg.get("flag_options", {}), USE_VELS_AUX=user_params.USE_RELATIVE_VELOCITIES
)
astro_params = lib.AstroParams(
**cfg.get("astro_params", {}), INHOMO_RECO=flag_options.INHOMO_RECO
)
_override(ctx, user_params, cosmo_params, astro_params, flag_options)
lc = lib.run_lightcone(
redshift=redshift,
max_redshift=max_z,
astro_params=astro_params,
flag_options=flag_options,
user_params=user_params,
cosmo_params=cosmo_params,
regenerate=regen,
write=True,
direc=direc,
random_seed=seed,
)
if out:
fname = out / lc.get_unique_filename() if out.is_dir() else out
lc.save(fname)
print(f"Saved Lightcone to {fname}.")
def _query(direc=None, kind=None, md5=None, seed=None, clear=False):
cls = list(
cache_tools.query_cache(direc=direc, kind=kind, hsh=md5, seed=seed, show=False)
)
if not clear:
print("%s Data Sets Found:" % len(cls))
print("------------------")
else:
print("Removing %s data sets..." % len(cls))
for file, c in cls:
if not clear:
print(" @ {%s}:" % file)
print(" %s" % str(c))
print()
else:
direc = direc or path.expanduser(_cfg.config["direc"])
remove(path.join(direc, file))
@main.command()
@click.option(
"-d",
"--direc",
type=click.Path(exists=True, dir_okay=True),
default=None,
help="directory to write data and plots to -- must exist.",
)
@click.option("-k", "--kind", type=str, default=None, help="filter by kind of data.")
@click.option("-m", "--md5", type=str, default=None, help="filter by md5 hsh")
@click.option("-s", "--seed", type=str, default=None, help="filter by random seed")
@click.option(
"--clear/--no-clear",
default=False,
help="remove all data sets returned by this query.",
)
def query(direc, kind, md5, seed, clear):
"""Query the cache database.
Parameters
----------
direc : str
Directory in which to search for cache items
kind : str
Filter output by kind of box (eg. InitialConditions)
md5 : str
Filter output by hsh
seed : float
Filter output by random seed.
clear : bool
Remove all data sets returned by the query.
"""
_query(direc, kind, md5, seed, clear)
@main.command()
@click.argument("param", type=str)
@click.argument("value", type=str)
@click.option(
"-s",
"--struct",
type=click.Choice(["flag_options", "cosmo_params", "user_params", "astro_params"]),
default="flag_options",
help="struct in which the new feature exists",
)
@click.option(
"-t",
"--vtype",
type=click.Choice(["bool", "float", "int"]),
default="bool",
help="type of the new parameter",
)
@click.option(
"-l/-c",
"--lightcone/--coeval",
default=True,
help="whether to use a lightcone for comparison",
)
@click.option(
"-z", "--redshift", type=float, default=6.0, help="redshift of the comparison boxes"
)
@click.option(
"-Z",
"--max-redshift",
type=float,
default=30,
help="maximum redshift of the comparison lightcone",
)
@click.option("-r", "--random-seed", type=int, default=12345, help="random seed to use")
@click.option("-v", "--verbose", count=True)
@click.option(
"-g/-G",
"--regenerate/--cache",
default=True,
help="whether to regenerate the boxes",
)
def pr_feature(
param,
value,
struct,
vtype,
lightcone,
redshift,
max_redshift,
random_seed,
verbose,
regenerate,
):
"""
Create standard plots comparing a default simulation against a simulation with a new feature.
The new feature is switched on by setting PARAM to VALUE.
Plots are saved in the current directory, with the prefix "pr_feature".
Parameters
----------
param : str
Name of the parameter to modify to "switch on" the feature.
value : float
Value to which to set it.
struct : str
The input parameter struct to which `param` belongs.
vtype : str
Type of the new parameter.
lightcone : bool
Whether the comparison should be done on a lightcone.
redshift : float
Redshift of comparison.
max_redshift : float
If using a lightcone, the maximum redshift in the lightcone to compare.
random_seed : int
Random seed at which to compare.
verbose : int
How verbose the output should be.
regenerate : bool
Whether to regenerate all data, even if it is in cache.
"""
import powerbox
lvl = [logging.WARNING, logging.INFO, logging.DEBUG][verbose]
logger = logging.getLogger("21cmFAST")
logger.setLevel(lvl)
value = getattr(builtins, vtype)(value)
structs = {
"user_params": {"HII_DIM": 128, "BOX_LEN": 250},
"flag_options": {"USE_TS_FLUCT": True},
"cosmo_params": {},
"astro_params": {},
}
if lightcone:
print("Running default lightcone...")
lc_default = lib.run_lightcone(
redshift=redshift,
max_redshift=max_redshift,
random_seed=random_seed,
regenerate=regenerate,
**structs,
)
structs[struct][param] = value
print("Running lightcone with new feature...")
lc_new = lib.run_lightcone(
redshift=redshift,
max_redshift=max_redshift,
random_seed=random_seed,
regenerate=regenerate,
**structs,
)
print("Plotting lightcone slices...")
for field in ["brightness_temp"]:
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True)
vmin = -150
vmax = 30
plotting.lightcone_sliceplot(
lc_default, ax=ax[0], fig=fig, vmin=vmin, vmax=vmax
)
ax[0].set_title("Default")
plotting.lightcone_sliceplot(
lc_new, ax=ax[1], fig=fig, cbar=False, vmin=vmin, vmax=vmax
)
ax[1].set_title("New")
plotting.lightcone_sliceplot(
lc_default, lightcone2=lc_new, cmap="bwr", ax=ax[2], fig=fig
)
ax[2].set_title("Difference")
plt.savefig(f"pr_feature_lighcone_2d_{field}.pdf")
def rms(x, axis=None):
return np.sqrt(np.mean(x ** 2, axis=axis))
print("Plotting lightcone history...")
fig, ax = plt.subplots(4, 1, sharex=True, gridspec_kw={"hspace": 0.05})
ax[0].plot(lc_default.node_redshifts, lc_default.global_xHI, label="Default")
ax[0].plot(lc_new.node_redshifts, lc_new.global_xHI, label="New")
ax[0].set_ylabel(r"$x_{\rm HI}$")
ax[0].legend()
ax[1].plot(
lc_default.node_redshifts,
lc_default.global_brightness_temp,
label="Default",
)
ax[1].plot(lc_new.node_redshifts, lc_new.global_brightness_temp, label="New")
ax[1].set_ylabel("$T_b$ [K]")
ax[3].set_xlabel("z")
rms_diff = rms(lc_default.brightness_temp, axis=(0, 1)) - rms(
lc_new.brightness_temp, axis=(0, 1)
)
ax[2].plot(lc_default.lightcone_redshifts, rms_diff, label="RMS")
ax[2].plot(
lc_new.node_redshifts,
lc_default.global_xHI - lc_new.global_xHI,
label="$x_{HI}$",
)
ax[2].plot(
lc_new.node_redshifts,
lc_default.global_brightness_temp - lc_new.global_brightness_temp,
label="$T_b$",
)
ax[2].legend()
ax[2].set_ylabel("Differences")
diff_rms = rms(lc_default.brightness_temp - lc_new.brightness_temp, axis=(0, 1))
ax[3].plot(lc_default.lightcone_redshifts, diff_rms)
ax[3].set_ylabel("RMS of Diff.")
plt.savefig("pr_feature_history.pdf")
print("Plotting power spectra history...")
p_default = []
p_new = []
z = []
thickness = 200 # Mpc
ncells = int(thickness / lc_new.cell_size)
chunk_size = lc_new.cell_size * ncells
start = 0
print(ncells)
while start + ncells <= lc_new.shape[-1]:
pd, k = powerbox.get_power(
lc_default.brightness_temp[:, :, start : start + ncells],
lc_default.lightcone_dimensions[:2] + (chunk_size,),
)
p_default.append(pd)
pn, k = powerbox.get_power(
lc_new.brightness_temp[:, :, start : start + ncells],
lc_new.lightcone_dimensions[:2] + (chunk_size,),
)
p_new.append(pn)
z.append(lc_new.lightcone_redshifts[start])
start += ncells
p_default = | np.array(p_default) | numpy.array |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# pylint: disable=invalid-name,g-bad-import-order,missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from absl import app
from absl import flags
from concurrent import futures
import gin
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Any, Dict, List, Optional, Tuple
from neutra import utils
tfd = tfp.distributions
tfb = tfp.bijectors
FLAGS = flags.FLAGS
TRAIN_BATCH = 250
TEST_BATCH = 1000
AIS_BATCH = 50
def ReduceL2(tensor, dims):
return tf.sqrt(tf.reduce_sum(tf.square(tensor), dims))
@utils.MakeTFTemplate
def Conv2DWN(inputs,
num_filters,
kernel_size=[3, 3],
stride=[1, 1],
pad="SAME",
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.shape[3])
with tf.variable_scope(scope, "conv_2d_wn"):
w = tf.get_variable(
"w", [kernel_size[0], kernel_size[1], num_inputs, num_filters],
initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, stride[0], stride[1], 1], pad)
if biases_initializer is not None:
out += tf.reshape(b, [1, 1, 1, num_filters])
return activation(out)
def GetLinearARMask(num_inputs, num_outputs, zero_diagonal=False):
assert num_inputs % num_outputs == 0 or num_outputs % num_inputs == 0, "%d vs %d" % (num_inputs, num_outputs)
mask = np.ones([num_inputs, num_outputs], dtype=np.float32)
if num_outputs >= num_inputs:
k = num_outputs // num_inputs
for i in range(num_inputs):
mask[i + 1:, i * k:(i + 1) * k] = 0
if zero_diagonal:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = num_inputs // num_outputs
for i in range(num_outputs):
mask[(i + 1) * k:, i:i + 1] = 0
if zero_diagonal:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def GetConvARMask(h, w, num_inputs, num_filters, zero_diagonal=False):
l = (h - 1) // 2
m = (w - 1) // 2
mask = np.ones([h, w, num_inputs, num_filters], dtype=np.float32)
mask[:l, :, :, :] = 0
mask[l, :m, :, :] = 0
mask[l, m, :, :] = GetLinearARMask(num_inputs, num_filters, zero_diagonal)
return mask
@utils.MakeTFTemplate
def Conv2DAR(inputs, num_filters,
kernel_size=[3, 3],
zero_diagonal=False,
weights_initializer=None,
biases_initializer=tf.zeros_initializer(),
scope=None):
num_inputs = int(inputs.get_shape()[3])
mask = GetConvARMask(kernel_size[0], kernel_size[1], num_inputs, num_filters, zero_diagonal)
w = tf.get_variable("w", [kernel_size[0], kernel_size[1], num_inputs, num_filters], initializer=weights_initializer)
b = tf.get_variable("b", [num_filters], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value() * mask, [0, 1, 2])))
g = tf.exp(g)
w = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(w * mask, [0, 1, 2])
out = tf.nn.conv2d(inputs, w, [1, 1, 1, 1], "SAME")
return out + tf.reshape(b, [1, 1, 1, num_filters])
@utils.MakeTFTemplate
def ConvAR(x,
h=None,
real_event_shape=[],
hidden_layers=[],
**kwargs):
#input_shape = (
# np.int32(x.shape.as_list())
# if x.shape.is_fully_defined() else tf.shape(x))
#x = tf.reshape(x, [-1] + real_event_shape)
for i, units in enumerate(hidden_layers):
x = Conv2DAR("conv2d_ar_%d"%i, num_filters=units, zero_diagonal=False, **kwargs)(inputs=x)
if i == 0 and h is not None:
if h.shape[-1] != x.shape[-1]:
x += Conv2DWN("conv2d_h", num_filters=int(x.shape[-1]), kernel_size=[1, 1], stride=[1, 1])(h)
else:
x += h
x = tf.nn.elu(x)
shift = Conv2DAR(
"conv2d_shift",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
log_scale = Conv2DAR(
"conv2d_scale",
num_filters=real_event_shape[-1],
zero_diagonal=True,
**kwargs)(
inputs=x)
#shift = tf.reshape(shift, input_shape)
#log_scale = tf.reshape(log_scale, input_shape)
return shift, log_scale
@utils.MakeTFTemplate
def DenseWN(inputs,
num_outputs,
activation=None,
weights_initializer=utils.L2HMCInitializer(),
biases_initializer=tf.zeros_initializer(),
scope=None):
if activation is None:
activation = lambda x: x
num_inputs = int(inputs.get_shape()[1])
with tf.variable_scope(scope, "dense_wn"):
w = tf.get_variable(
"w", [num_inputs, num_outputs], initializer=weights_initializer)
if biases_initializer is not None:
b = tf.get_variable("b", [num_outputs], initializer=biases_initializer)
g = tf.get_variable(
"g", initializer=tf.log(ReduceL2(w.initialized_value(), [0])))
g = tf.exp(g)
w = g * tf.nn.l2_normalize(w, [0])
out = tf.matmul(inputs, w)
if biases_initializer is not None:
out += tf.expand_dims(b, 0)
return activation(out)
@utils.MakeTFTemplate
def ResConv2D(inputs,
num_filters,
kernel_size,
stride,
activation=tf.nn.elu,
output_init_factor=1.0):
x = Conv2DWN(
"conv2d_in",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
activation=activation)(
inputs=inputs)
non_linear = Conv2DWN(
"conv2d_nl",
num_filters=num_filters,
kernel_size=kernel_size,
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=x)
skip = Conv2DWN(
"conv2d_skip",
num_filters=num_filters,
kernel_size=kernel_size,
stride=stride,
weights_initializer=utils.L2HMCInitializer(factor=output_init_factor))(
inputs=inputs)
return non_linear + skip
@utils.MakeTFTemplate
def ResDense(inputs, num_dims, activation=None):
x = DenseWN("dense_in", num_outputs=num_dims, activation=activation)(inputs)
non_linear = DenseWN("dense_nl", num_outputs=num_dims)(x)
skip = DenseWN("dense_skip", num_outputs=num_dims)(x)
return non_linear + skip
@gin.configurable("conv_hier_encoder")
@utils.MakeTFTemplate
def ConvHierEncoder(images, depth = 2, num_blocks = 2, z_dims = 32, h_dims=160):
x = Conv2DWN("conv2d_in", num_filters=h_dims, stride=[2, 2], kernel_size=[5, 5])(inputs=images - 0.5)
means = []
raw_scales = []
contexts = []
for i in range(depth):
for j in range(num_blocks):
downsample = i > 0 and j == 0
if downsample:
stride = [2, 2]
else:
stride = [1, 1]
h = tf.nn.elu(x)
h = Conv2DWN("conv2d_in_%d_%d"%(i, j), num_filters=2*z_dims + 2 * h_dims, stride=stride, kernel_size=[3, 3])(inputs=h)
mean, raw_scale, context, h = tf.split(h, [z_dims, z_dims, h_dims, h_dims], -1)
means.append(mean)
raw_scales.append(raw_scale)
contexts.append(context)
h = tf.nn.elu(h)
h = Conv2DWN("conv2d_h_%d_%d"%(i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
if downsample:
x = tf.image.resize_nearest_neighbor(x, [int(x.shape[1]) // 2, int(x.shape[2]) // 2])
x += 0.1 * h
return means, raw_scales, contexts
@gin.configurable("conv_hier_prior_post")
@utils.MakeTFTemplate
def ConvHierPriorPost(images=None,
encoder=None,
z=None,
batch=None,
depth = 2,
num_blocks = 2,
z_dims = 32,
h_dims = 160,
image_width = 32):
is_q = encoder is not None
if is_q:
means, raw_scales, up_contexts = encoder(images)
if batch is None:
if images is not None:
batch = tf.shape(images)[0]
else:
batch = tf.shape(z[0])[0]
h = tf.get_variable("h_top", [h_dims], initializer=tf.zeros_initializer())
h = tf.reshape(h, [1, 1, 1, -1])
top_width = image_width // 2 ** num_blocks
h = tf.tile(h, [batch, top_width, top_width, 1])
x = h
ret_z = []
ret_log_pz = []
for i in reversed(list(range(depth))):
for j in reversed(list(range(num_blocks))):
downsample = i > 0 and j == 0
h = tf.nn.elu(x)
h_p = Conv2DWN(
"conv2d_p_%d_%d" % (i, j),
num_filters=2 * h_dims + 2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
p_mean, p_raw_scale, down_context, h_det = tf.split(
h_p, [z_dims, z_dims, h_dims, h_dims], -1)
p_z = tfd.Independent(
tfd.Normal(loc=p_mean, scale=tf.nn.softplus(p_raw_scale)),
reinterpreted_batch_ndims=3)
if is_q:
h_q = Conv2DWN(
"conv2d_q_%d_%d" % (i, j),
num_filters=2 * z_dims,
stride=[1, 1],
kernel_size=[3, 3])(
inputs=h)
q_mean, q_raw_scale = tf.split(h_q, [z_dims, z_dims], -1)
context = down_context + up_contexts.pop()
q_mean += means.pop()
q_raw_scale += raw_scales.pop()
num_flat_dims = np.prod(q_mean.shape.as_list()[1:])
_maf_template = ConvAR(
"iaf_%d_%d" % (i, j),
real_event_shape=q_mean.shape.as_list()[1:],
hidden_layers=[h_dims, h_dims],
h=context,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
#x.set_shape([None, num_flat_dims])
x.set_shape([None] + q_mean.shape.as_list()[1:])
return t(x)
bijectors = []
#bijectors.append(tfb.Reshape(tf.shape(q_mean)[1:], [num_flat_dims]))
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
#bijectors.append(tfb.Reshape([num_flat_dims], tf.shape(q_mean)[1:]))
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
bijectors.append(tfb.AffineScalar(shift=q_mean, scale=tf.nn.softplus(q_raw_scale)))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(q_mean), scale=tf.ones_like(q_raw_scale)),
reinterpreted_batch_ndims=3)
q_z = tfd.TransformedDistribution(mvn, bijector)
if is_q:
dist = q_z
else:
dist = p_z
if z is None:
z_val = dist.sample()
else:
z_val = z[0]
z = z[1:]
ret_z.append(z_val)
ret_log_pz.append(dist.log_prob(z_val))
h = tf.concat([z_val, h_det], -1)
if downsample:
new_shape = [2 * int(x.shape[1]), 2 * int(x.shape[2])]
x = tf.image.resize_nearest_neighbor(x, new_shape)
h = tf.image.resize_nearest_neighbor(h, new_shape)
h = Conv2DWN("deconv2d_%d_%d" % (i, j), num_filters=h_dims, stride=[1, 1], kernel_size=[3, 3])(inputs=h)
x = x + 0.1 * h
x = tf.image.resize_nearest_neighbor(x, [2 * int(x.shape[1]), 2 * int(x.shape[2])])
x = Conv2DWN("conv2d_out", num_filters=3, stride=[1, 1], kernel_size=[5, 5])(inputs=x)
return ret_z, ret_log_pz, x
@gin.configurable("conv_encoder")
@utils.MakeTFTemplate
def ConvEncoder(images, num_outputs, hidden_dims = 450,
filter_scale = 1, fully_convolutional = False):
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 16, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return ResConv2D("res_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
else:
x = tf.reshape(x, [-1, filter_scale * 32 * 4 * 4])
x = ResDense("dense_h", num_dims=hidden_dims, activation=tf.nn.elu)(x)
return DenseWN(
"dense_out",
num_outputs=num_outputs,
weights_initializer=utils.L2HMCInitializer())(
x)
@gin.configurable("conv_decoder")
@utils.MakeTFTemplate
def ConvDecoder(encoding,
output_shape,
filter_scale = 1,
hidden_dims = 450,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
if fully_convolutional:
tf.logging.info("Encoding shape: %s", encoding.shape)
x = ResConv2D("res_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
else:
x = ResDense("dense_in", num_dims=hidden_dims, activation=tf.nn.elu)(encoding)
x = ResDense("dense_h", num_dims=filter_scale * 32 * 4 * 4, activation=tf.nn.elu)(x)
x = tf.reshape(x, [-1, 4, 4, filter_scale * 32])
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = ResConv2D("res_5", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=32 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = ResConv2D("res_3", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=16 * filter_scale, kernel_size=[3, 3], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = ResConv2D(
"res_1",
num_filters=output_shape[-1],
kernel_size=[3, 3],
stride=[1, 1],
output_init_factor=0.01)(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder2")
@utils.MakeTFTemplate
def ConvEncoder2(images, num_outputs, filter_scale = 1):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 16, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_5", num_filters=filter_scale * 32, kernel_size=[5, 5], stride=[2, 2], activation=tf.nn.elu)(x)
return ResConv2D("conv_out", num_filters=num_outputs, kernel_size=[3, 3], stride=[1, 1])(x)
@gin.configurable("conv_decoder2")
@utils.MakeTFTemplate
def ConvDecoder2(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = Conv2DWN("conv_in", num_filters=filter_scale * 32, kernel_size=[3, 3], stride=[1, 1])(encoding)
x = tf.image.resize_nearest_neighbor(x, [8, 8])
x = Conv2DWN("conv_5", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_4", num_filters=32 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
if output_shape[1] == 28:
# 8x8 -> 7x7
x = x[:, 1:, 1:, :]
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_3", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = Conv2DWN("conv_2", num_filters=16 * filter_scale, kernel_size=[5, 5], stride=[1, 1], activation=tf.nn.elu)(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN(
"conv_1",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder3")
@utils.MakeTFTemplate
def ConvEncoder3(images, num_outputs, hidden_dims = 450,
filter_scale = 1):
# This comes from VLAE paper.
x = images
x = ResConv2D("res_1", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_3", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_8", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
return Conv2DWN("conv_10", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
@gin.configurable("conv_decoder3")
@utils.MakeTFTemplate
def ConvDecoder3(encoding,
output_shape,
filter_scale = 1):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
x = Conv2DWN("conv_1", num_filters=filter_scale * 96, kernel_size=[1, 1], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_2", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_3", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_4", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_5", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_6", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_7", num_filters=filter_scale * 96, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_8", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_9", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = ResConv2D("res_10", num_filters=filter_scale * 48, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[5, 5],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("conv_encoder4")
@utils.MakeTFTemplate
def ConvEncoder4(images, num_outputs,
filter_scale = 1,
fully_convolutional = False):
x = images
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[2, 2])(x)
x = tf.nn.elu(x)
if fully_convolutional:
return Conv2DWN("conv_out", num_filters=num_outputs, kernel_size=[1, 1], stride=[1, 1])(x)
else:
return DenseWN("dense_out", num_outputs=num_outputs)(tf.layers.flatten(x))
@gin.configurable("conv_decoder4")
@utils.MakeTFTemplate
def ConvDecoder4(encoding,
output_shape,
filter_scale = 1,
fully_convolutional = False):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = encoding
if not fully_convolutional:
x = tf.reshape(DenseWN("dense_in", num_outputs=8*8*16)(x), [-1, 8, 8, 16])
x = tf.image.resize_nearest_neighbor(x, [output_shape[0] // 2, output_shape[1] // 2])
x = Conv2DWN("conv_1", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = tf.image.resize_nearest_neighbor(x, [output_shape[0], output_shape[1]])
x = Conv2DWN("conv_2", num_filters=filter_scale * 64, kernel_size=[5, 5], stride=[1, 1])(x)
x = tf.nn.elu(x)
x = Conv2DWN(
"conv_out",
num_filters=output_shape[-1],
kernel_size=[1, 1],
stride=[1, 1],
weights_initializer=utils.L2HMCInitializer(0.01))(
x)
return tf.reshape(x, [-1] + output_shape)
@gin.configurable("dense_encoder")
@utils.MakeTFTemplate
def DenseEncoder(images,
num_outputs,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
x = tf.layers.flatten(images)
# Center the data, assuming it goes from [0, 1] initially.
# x = 2.0 * x - 1.0
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
return tf.layers.dense(x, num_outputs, kernel_initializer=utils.L2HMCInitializer())
@gin.configurable("dense_decoder")
@utils.MakeTFTemplate
def DenseDecoder(encoding,
output_shape,
hidden_layer_sizes = [1024, 1024],
activation=tf.nn.elu):
if isinstance(encoding, (list, tuple)):
encoding = encoding[0]
x = tf.layers.flatten(encoding)
for size in hidden_layer_sizes:
x = tf.layers.dense(
x, size, activation=activation, kernel_initializer=utils.L2HMCInitializer())
num_outputs = np.prod(output_shape)
return tf.reshape(
tf.layers.dense(
x, num_outputs, kernel_initializer=utils.L2HMCInitializer(factor=0.01)),
[-1] + output_shape)
def IndependentBernouli3D(logits):
return tfd.Independent(
tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=3)
def IndependentDiscreteLogistic3D(locations,
scales):
dist = tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=locations, scale=scales),
bijector=tfb.AffineScalar(scale=255.0))
dist = tfd.QuantizedDistribution(distribution=dist, low=0., high=255.0)
dist = tfd.Independent(dist, reinterpreted_batch_ndims=3)
class ScaleHack(object):
def __init__(self, dist):
self._dist = dist
def sample(self, *args, **kwargs):
return self._dist.sample(*args, **kwargs) / 255.0
def log_prob(self, x, *args, **kwargs):
return self._dist.log_prob(tf.clip_by_value(x * 255.0, 0.0, 255.0), *args, **kwargs)
return ScaleHack(dist)
def IndependentDiscreteLogistic3D2(locations,
scales):
class IndependentDiscreteLogistic(object):
def __init__(self, loc, scale):
self._loc = loc
self._scale = scale
def sample(self, *args, **kwargs):
dist = tfd.Logistic(loc=self._loc, scale=self._scale)
return tf.clip_by_value(dist.sample(*args, **kwargs), 0.0, 1.0)
def log_prob(self, x, *args, **kwargs):
sample = x
mean = self._loc
scales = self._scale
binsize=1.0 / 256.0
sample = (tf.floor(sample / binsize) * binsize - mean) / scales
return tf.reduce_sum(
tf.log(
tf.sigmoid(sample + binsize / scales) - tf.sigmoid(sample) + 1e-7),
[-1, -2, -3])
return IndependentDiscreteLogistic(locations, scales)
@gin.configurable("dense_recognition")
@utils.MakeTFTemplate
def DenseRecognition(images, encoder, z=None, sigma_activation="exp"
):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
num_dims = int(encoding.shape[-1]) // 2
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, 2]), num=2, axis=-1)
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.Affine(shift=mu, scale_diag=sigma)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
tf.logging.info("bijector z shape: %s", z[0].shape)
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine")
@utils.MakeTFTemplate
def DenseRecognitionAffine(images, encoder, z=None,
z_dims=None):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
tril_raw = tfd.fill_triangular(encoding[:, z_dims:])
sigma = tf.nn.softplus(tf.matrix_diag_part(tril_raw))
tril = tf.linalg.set_diag(tril_raw, sigma)
bijector = tfb.Affine(shift=mu, scale_tril=tril)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_affine_lr")
@utils.MakeTFTemplate
def DenseRecognitionAffineLR(images, encoder, z=None,
z_dims=None, rank=1):
"""Models Q(z | encoder(x))"""
encoding = encoder(images)
mu = encoding[:, :z_dims]
sigma = encoding[:, z_dims:2*z_dims]
perturb = encoding[:, 2*z_dims:]
perturb = tf.reshape(perturb, [-1, z_dims, rank])
sigma = tf.nn.softplus(sigma)
bijector = tfb.Affine(shift=mu, scale_diag=sigma,
scale_perturb_factor=perturb)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_rnvp")
@utils.MakeTFTemplate
def DenseRecognitionRNVP(
images,
encoder,
z=None,
num_bijectors=3,
condition_bijector=False,
layer_sizes=[128, 128],
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_bijector:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_bijector:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_bijectors):
_rnvp_template = utils.DenseShiftLogScale(
"rnvp_%d" % i,
h=h,
hidden_layers=layer_sizes,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def rnvp_template(x, output_units, t=_rnvp_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims - output_units])
return t(x, output_units)
bijectors.append(
tfb.Invert(
tfb.RealNVP(
num_masked=num_dims // 2,
shift_and_log_scale_fn=rnvp_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("dense_recognition_iaf")
@utils.MakeTFTemplate
def DenseRecognitionIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="exp"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1, num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_iaf_layers):
#_maf_template = tfb.masked_autoregressive_default_template(
# hidden_layers=iaf_layer_sizes,
# activation=tf.nn.softplus,
# kernel_initializer=utils.L2HMCInitializer(factor=0.01))
_maf_template = utils.DenseAR(
"maf_%d" % i,
hidden_layers=iaf_layer_sizes,
h=h,
activation=tf.nn.softplus,
kernel_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None, num_dims])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.Affine(shift=mu, scale_diag=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(mu), scale_diag=tf.ones_like(sigma))
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
class FlipImageBijector(tfb.Bijector):
def __init__(self, validate_args=False, name=None):
"""Creates the `Permute` bijector.
Args:
permutation: An `int`-like vector-shaped `Tensor` representing the
permutation to apply to the rightmost dimension of the transformed
`Tensor`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if `not permutation.dtype.is_integer`.
ValueError: if `permutation` does not contain exactly one of each of
`{0, 1, ..., d}`.
"""
super(FlipImageBijector, self).__init__(
forward_min_event_ndims=3,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "flip_image")
def _forward(self, x):
return tf.image.flip_left_right(tf.image.flip_up_down(x))
def _inverse(self, y):
return tf.image.flip_up_down(tf.image.flip_left_right(y))
def _inverse_log_det_jacobian(self, y):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return tf.constant(0., dtype=y.dtype.base_dtype)
def _forward_log_det_jacobian(self, x):
return tf.constant(0., dtype=x.dtype.base_dtype)
@gin.configurable("conv_iaf")
@utils.MakeTFTemplate
def ConvIAF(
images,
encoder,
z=None,
num_iaf_layers=2,
iaf_layer_sizes=[128, 128],
condition_iaf=False,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
if condition_iaf:
num_parts = 3
else:
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
if condition_iaf:
h = encoding_parts[2]
else:
h = None
bijectors = []
for i in range(num_iaf_layers):
_maf_template = ConvAR(
"iaf_%d" % i,
real_event_shape=encoding_parts[0].shape.as_list()[1:],
hidden_layers=iaf_layer_sizes,
h=h,
weights_initializer=utils.L2HMCInitializer(factor=0.01))
def maf_template(x, t=_maf_template):
# TODO: I don't understand why the shape gets lost.
x.set_shape([None] + encoding_parts[0].shape.as_list()[1:])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=maf_template)))
bijectors.append(FlipImageBijector())
# Drop the last swap.
bijectors = bijectors[:-1]
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijectors.append(tfb.AffineScalar(shift=mu, scale=sigma))
bijector = tfb.Chain(bijectors)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@gin.configurable("conv_shift_scale")
@utils.MakeTFTemplate
def ConvShiftScale(
images,
encoder,
z=None,
sigma_activation="softplus"):
"""Models Q(z | encoder(x)), z = f(w, encoder)"""
encoding = encoder(images)
if encoding.shape.ndims != 4:
raise ValueError("ConvIAF requires a convolutional encoder. %s", encoding.shape)
num_parts = 2
num_dims = int(encoding.shape[-1]) // num_parts
encoding_parts = tf.unstack(
tf.reshape(encoding, [-1] + encoding.shape.as_list()[1:-1] + [num_dims, num_parts]), num=num_parts, axis=-1)
# Do the shift/scale explicitly, so that we can use bijector to map the
# distribution to the standard normal, which is helpful for HMC.
mu = encoding_parts[0]
if sigma_activation == "exp":
sigma = tf.exp(0.5 * encoding_parts[1])
elif sigma_activation == "softplus":
sigma = tf.nn.softplus(encoding_parts[1])
bijector = tfb.AffineScalar(shift=mu, scale=sigma)
mvn = tfd.Independent(
tfd.Normal(loc=tf.zeros_like(mu), scale=tf.ones_like(sigma)),
reinterpreted_batch_ndims=3)
dist = tfd.TransformedDistribution(mvn, bijector)
if z is None:
z = [dist.sample()]
return z, [dist.log_prob(z[0])], [bijector] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def SimplePrior(z=None, batch=None,
num_dims=None):
"""Models P(z)"""
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros(num_dims), scale_diag=tf.ones(num_dims))
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def Simple3DPrior(z=None, batch=None,
shape=None):
"""Models P(z)"""
mvn = tfd.Independent(tfd.Normal(loc=tf.zeros(shape), scale=tf.ones(shape)), reinterpreted_batch_ndims=3)
if z is None:
z = [mvn.sample(batch)]
return z, [mvn.log_prob(z[0])] # pytype: disable=bad-return-type
@utils.MakeTFTemplate
def DenseMNISTNoise(x=None, z=None, decoder=None, return_means=True):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
bernoulli = IndependentBernouli3D(decoding)
if x is None:
if return_means:
x = bernoulli.mean()
else:
x = tf.to_float(bernoulli.sample())
return x, bernoulli.log_prob(x)
@gin.configurable("cifar10_noise")
@utils.MakeTFTemplate
def DenseCIFAR10TNoise(x=None, z=None, decoder=None, return_means=True, uniform_scale=False, logistic_impl="mine"):
"""Models P(x | decoder(z))"""
decoding = decoder(z)
if uniform_scale:
scale = tf.get_variable("scale", initializer=1.0)
scales = tf.reshape(scale, [1, 1, 1])
else:
scales = tf.get_variable(
"scales", [32, 32, 3], initializer=tf.ones_initializer())
if logistic_impl == "mine":
disc_logistic = IndependentDiscreteLogistic3D(decoding, tf.nn.softplus(scales))
elif logistic_impl == "kingma":
disc_logistic = IndependentDiscreteLogistic3D2(decoding, tf.nn.softplus(scales))
if x is None:
x = tf.to_float(disc_logistic.sample())
return x, disc_logistic.log_prob(x)
@gin.configurable("learning_rate")
def LearningRate(train_size, global_step, schedule = "hoffman", warmup_steps=0):
if schedule == "hoffman":
base = tf.train.piecewise_constant(
global_step, [train_size * 500 // TRAIN_BATCH], [1e-3, 1e-4])
elif schedule == "new":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[1e-3, 1e-4, 1e-5])
elif schedule == "new_gentle":
base = tf.train.piecewise_constant(
global_step,
[train_size * 500 // TRAIN_BATCH, train_size * 800 // TRAIN_BATCH],
[0.5e-3, 1e-4, 1e-5])
elif schedule == "fast":
base = tf.train.piecewise_constant(
global_step,
[train_size * 800 // TRAIN_BATCH],
[1e-2, 1e-5])
else:
raise ValueError("Invalid schedule: " + schedule)
if warmup_steps == 0:
return base
else:
return tf.minimum(base * tf.to_float(global_step) / warmup_steps, base)
VAEOutputs = collections.namedtuple(
"VAEOutputs", "log_p_x_z, elbo, sample_means, recon_means, klqp, total_klqp, post_z, prior_z")
AISOutputs = collections.namedtuple(
"AISOutputs",
"log_p, p_accept, z_fin, recon"
)
def MakeVAE(images, recognition, prior, noise, beta, num_samples,
min_kl):
z, log_q_z = recognition(images)
_, log_p_z = prior(z)
_, log_p_x_z = noise(images, z)
post_z = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - beta * total_klqp
recon_means, _ = noise(None, z)
z, _ = prior(batch=num_samples)
sample_means, _ = noise(None, z)
return VAEOutputs(
log_p_x_z=log_p_x_z,
elbo=elbo,
sample_means=sample_means,
recon_means=recon_means,
klqp=klqp,
total_klqp=total_klqp,
post_z=post_z,
prior_z=z)
DLGMOutputs = collections.namedtuple(
"DLGMOutputs",
"elbo, sample_means, mcmc_log_p, recon_means, p_accept, post_z, post_z_chain, q_z, xentpq"
)
@gin.configurable("dlgm")
class DLGM(object):
def __init__(self,
z_dims=64,
beta=1.0,
beta_steps=0,
step_size=0.2,
num_leapfrog_steps=5,
num_hmc_steps=2,
use_neutra=True,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
q_loss_type="klqp",
min_kl=0.0,
symm_factor=0.5,
save_chain_state=False,
chain_warmup_epochs=5,
use_q_z_for_gen=False,
no_gen_train_steps=0,
dataset=None,
use_bijector_for_ais=False,
prior_type="simple",
adapt_step_size=False,
step_size_gain=1e-3,
use_q_z_for_ais=False,
affine_rank=1,
step_size_warmup=0):
self.train_size = dataset.train_size
self._use_q_z_for_ais = use_q_z_for_ais
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._use_bijector_for_ais = use_bijector_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
self._use_neutra = use_neutra
self._num_leapfrog_steps = num_leapfrog_steps
self._num_hmc_steps = num_hmc_steps
self._q_loss_type = q_loss_type
self._symm_factor = symm_factor
self._save_chain_state = save_chain_state
self._chain_warmup_epochs = chain_warmup_epochs
self._use_q_z_for_gen = use_q_z_for_gen
self._no_gen_train_steps = no_gen_train_steps
self._step_size_gain = step_size_gain
self._adapt_step_size = adapt_step_size
self._step_size_warmup = step_size_warmup
self._init_step_size = step_size
if self._adapt_step_size:
self._step_size = tf.get_variable("step_size", initializer=step_size)
else:
self._step_size = tf.constant(step_size)
if self._save_chain_state:
self._chain_state = tf.get_variable(
"train_chain_state", [self.train_size, z_dims], trainable=False)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
#assert dataset.name == "cifar10"
#self._encoder = ConvHierEncoder("encoder")
#self._prior_posterior = ConvHierPriorPost("prior_post")
#self._decoder = lambda z: self._prior_posterior(z=z)[2]
#self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
#self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
pass
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition(
"recog",
encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
self._recog = recog
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def AdjustedStepSize(self):
if self._step_size_warmup > 0:
global_step = tf.train.get_or_create_global_step()
max_step = self._init_step_size * tf.to_float(
global_step) / self._step_size_warmup
return tf.where(global_step > self._step_size_warmup, self._step_size,
tf.minimum(max_step, self._step_size))
else:
return self._step_size
def RecogVars(self):
return self._encoder.variables + self._recog.variables
def GenVars(self):
return (
self._prior.variables + self._decoder.variables + self._noise.variables)
def MakeDLGM(self,
images,
other_z_init=None,
use_other_z_init=None,
num_samples=64):
z, log_q_z, bijector = self._recog(images)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
post_z = z
q_z = z
if use_other_z_init is not None:
z_init = [tf.cond(use_other_z_init, lambda: tf.identity(other_layer_z),
lambda: tf.identity(layer_z)) for other_layer_z, layer_z in zip(z, other_z_init)]
z_init = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(self._min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - self._beta * total_klqp
def TargetLogProbFn(*z):
for post_z_e, z_e in zip(post_z, z):
tf.logging.info("Shape here: %s %s", post_z_e.shape, z_e.shape)
z_e.set_shape(post_z_e.shape)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=TargetLogProbFn,
step_size=self.AdjustedStepSize(),
num_leapfrog_steps=self._num_leapfrog_steps)
if self._use_neutra:
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel, bijector=bijector)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=self._num_hmc_steps, current_state=z, kernel=kernel)
z = [tf.stop_gradient(s[-1, Ellipsis]) for s in states]
post_z = z
_, log_q_z, _ = self._recog(images, z=z)
xentpq = -tf.add_n([tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z])
if self._use_q_z_for_gen:
z = q_z
recon_means, _ = self._noise(None, z)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
mcmc_log_p = tf.reduce_mean(tf.add_n(log_p_z) + log_p_x_z)
if self._use_neutra:
log_accept_ratio = kernel_results.inner_results.log_accept_ratio
else:
log_accept_ratio = kernel_results.log_accept_ratio
p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))
z, _ = self._prior(batch=num_samples)
sample_means, _ = self._noise(None, z)
return DLGMOutputs(
elbo=elbo,
sample_means=sample_means,
mcmc_log_p=mcmc_log_p,
recon_means=recon_means,
p_accept=p_accept,
post_z=post_z,
post_z_chain=states,
q_z=z_init,
xentpq=xentpq)
def GetPosterior(self, images):
outputs = self.MakeDLGM(images)
return outputs.post_z
def TrainOp(self, data_idx, images):
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
if self._save_chain_state:
other_z_init = tf.gather(self._chain_state, data_idx)
use_other_z_init = (
global_step > self._chain_warmup_epochs * self.train_size // TRAIN_BATCH)
else:
other_z_init = None
use_other_z_init = None
outputs = self.MakeDLGM(
images, other_z_init=other_z_init, use_other_z_init=use_other_z_init)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
utils.LogAndSummarizeMetrics({
"learning_rate": learning_rate,
"elbo": outputs.elbo,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
"step_size": self.AdjustedStepSize(),
}, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
if self._save_chain_state:
with tf.control_dependencies([outputs.post_z]):
chain_state_update_op = tf.scatter_update(self._chain_state, data_idx,
outputs.post_z)
else:
chain_state_update_op = tf.no_op()
if self._adapt_step_size:
new_step_size = self._step_size + self._step_size_gain * (outputs.p_accept - 0.651)
new_step_size = tf.clip_by_value(new_step_size, 1e-3, 0.5)
step_size_op = self._step_size.assign(
tf.where(global_step > self._step_size_warmup, new_step_size,
self._step_size))
else:
step_size_op = tf.no_op()
with tf.name_scope("recog_train"):
if self._q_loss_type == "klqp":
loss = -outputs.elbo
elif self._q_loss_type == "symm":
loss = (
self._symm_factor * -outputs.elbo +
(1.0 - self._symm_factor) * outputs.xentpq)
elif self._q_loss_type == "klpq":
loss = outputs.xentpq
if self._save_chain_state:
# Not super efficient...
loss = tf.cond(use_other_z_init, lambda: tf.identity(loss),
lambda: tf.identity(-outputs.elbo))
recog_train_op = tf.contrib.training.create_train_op(
loss,
opt,
summarize_gradients=True,
variables_to_train=self.RecogVars(),
transform_grads_fn=utils.ProcessGradients)
with tf.name_scope("gen_train"):
gen_loss = tf.cond(global_step < self._no_gen_train_steps,
lambda: -outputs.elbo, lambda: -outputs.mcmc_log_p)
gen_train_op = tf.contrib.training.create_train_op(
gen_loss,
opt,
None,
summarize_gradients=True,
variables_to_train=self.GenVars(),
transform_grads_fn=utils.ProcessGradients)
return tf.group(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)
def EvalOp(self, data_idx, images):
outputs = self.MakeDLGM(images)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
return utils.LogAndSummarizeMetrics({
"elbo": outputs.elbo,
"xentpq": outputs.xentpq,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
})
def AIS(self, images, num_chains):
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z, _ = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
if self._use_bijector_for_ais:
_, _, bijector = self._recog(images)
else:
bijector = None
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("vae")
class VAE(object):
def __init__(self,
z_dims=64,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
beta=1.0,
beta_steps=0,
min_kl=0,
use_q_z_for_ais=False,
dataset=None,
prior_type="simple",
affine_rank=1):
self.train_size = dataset.train_size
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._beta = beta
self._use_q_z_for_ais = use_q_z_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
assert dataset.name == "cifar10"
self._encoder = ConvHierEncoder("encoder")
self._prior_posterior = ConvHierPriorPost("prior_post")
self._decoder = lambda z: self._prior_posterior(z=z)[2]
self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition("recog", encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
# Drop the bijector return.
self._recog = lambda *args, **kwargs: recog(*args, **kwargs)[:2]
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def MakeVAE(self, images, beta_override=None, num_samples=64):
if beta_override is not None:
beta = beta_override
else:
beta = self._beta
return MakeVAE(images, self._recog, self._prior, self._noise, beta,
num_samples, self._min_kl)
def TrainOp(self, data_idx, images):
outputs = self.MakeVAE(images)
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
metrics = {
"learning_rate": learning_rate,
"log_p_x_z": outputs.log_p_x_z,
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
"beta": self._beta,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
utils.LogAndSummarizeMetrics(metrics, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
return tf.contrib.training.create_train_op(
-outputs.elbo,
opt,
summarize_gradients=True,
transform_grads_fn=utils.ProcessGradients)
def GetPosterior(self, images):
outputs = self.MakeVAE(images)
return outputs.post_z
def EvalOp(self, data_idx, images):
outputs = self.MakeVAE(images, 1.0)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
metrics = {
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
return utils.LogAndSummarizeMetrics(metrics)
def AIS(self, images, num_chains):
outputs = self.MakeVAE(images)
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("train")
def Train(model, dataset, train_dir, master, epochs=600, polyak_averaging=0.0, warmstart_ckpt=""):
data_idx, images = dataset.TrainBatch(TRAIN_BATCH, epochs)
train_op = model.TrainOp(data_idx, images)
if polyak_averaging > 0.0:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=polyak_averaging)
with tf.control_dependencies([train_op]):
train_op = ema.apply()
utils.LogAndSaveHParams()
tf.Session.reset(master)
if warmstart_ckpt:
tf.init_from_checkpoint(warmstart_ckpt, {"/": "/"})
hooks = [
tf.train.StopAtStepHook(last_step=dataset.train_size * epochs //
TRAIN_BATCH),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), every_n_secs=60)
]
tf.contrib.training.train(
train_op,
logdir=train_dir,
master=master,
hooks=hooks,
save_checkpoint_secs=120,
save_summaries_steps=60)
def Eval(model, dataset, train_dir, eval_dir, master,
use_polyak_averaging=False, max_number_of_evaluations=None):
data_idx, images = dataset.TestBatch(TEST_BATCH)
eval_op = model.EvalOp(data_idx, images)
utils.LogAndSaveHParams()
tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(saver=saver)
tf.Session.reset(master)
hooks = [
# Just for logging.
tf.contrib.training.StopAfterNEvalsHook(dataset.test_size // TEST_BATCH),
tf.contrib.training.SummaryAtEndHook(eval_dir),
tf.train.LoggingTensorHook(utils.GetLoggingOutputs(), at_end=True)
]
tf.contrib.training.evaluate_repeatedly(
train_dir,
eval_ops=eval_op,
hooks=hooks,
# LOL...
eval_interval_secs=120,
max_number_of_evaluations=max_number_of_evaluations,
master=master,
scaffold=scaffold)
def AISEvalShard(shard, master, num_workers, num_chains, dataset, use_polyak_averaging, writer, train_dir, model_fn, batch):
tf.logging.info("Thread started")
model = model_fn()
tf.logging.info("Built model")
shard_idx = tf.placeholder(tf.int64, [])
tf.logging.info("built data")
data_iterator = dataset.AISIterator(batch, shard_idx, num_workers)
images, _ = data_iterator.get_next()
tf.logging.info("Built mA")
ais_outputs = model.AIS(images, num_chains)
log_p = ais_outputs.log_p
p_accept = ais_outputs.p_accept
tf.logging.info("Built mB")
if shard == 1:
utils.LogAndSaveHParams()
summary_op = tf.summary.merge_all()
global_step = tf.train.get_or_create_global_step()
if use_polyak_averaging:
tf.logging.info("Using polyak averaging")
ema = tf.train.ExponentialMovingAverage(decay=0.99)
saver = tf.train.Saver(ema.variables_to_restore())
else:
saver = tf.train.Saver()
tf.logging.info("Built mC")
global_step_val = []
tf.logging.info("Starting shard %d, %s", shard, master)
#with tf.MonitoredSession(
# tf.train.ChiefSessionCreator(
# master=master,
# checkpoint_dir=train_dir)) as sess:
while True:
try:
tf.Session.reset(master)
with tf.Session(master) as sess:
all_log_p = np.zeros([0])
saver.restore(sess, tf.train.latest_checkpoint(train_dir))
sess.run(data_iterator.initializer, {shard_idx: shard})
try:
step_num = 0
while True:
fetch = {
"log_p": log_p,
"global_step": global_step,
"p_accept": p_accept
}
if shard == 0:
fetch["summary"] = summary_op
tf.logging.info("Shard %d step %d started.", shard, step_num)
fetch = sess.run(fetch)
tf.logging.info("Shard %d step %d done.", shard, step_num)
tf.logging.info("Shard %d log_p %.2f, p_accept: %.2f", shard,
np.mean(fetch["log_p"]),
np.mean(fetch["p_accept"]))
all_log_p = np.hstack([all_log_p, fetch["log_p"]])
if shard == 0 and step_num == 0:
global_step_val.append(fetch["global_step"])
writer.add_summary(fetch["summary"], global_step_val[0])
step_num += 1
except tf.errors.OutOfRangeError:
tf.logging.info("Shard %d done.", shard)
pass
return all_log_p
except tf.errors.AbortedError:
pass
def AISEval(model_fn, dataset, train_dir, eval_dir, worker_master_pattern,
num_workers, num_chains, use_polyak_averaging=False):
tf.reset_default_graph()
log_p_ph = tf.placeholder(tf.float32, [None])
log_p_summary = tf.summary.scalar("log_p", tf.reduce_mean(log_p_ph))
writer = tf.summary.FileWriter(eval_dir)
with futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
results = []
for shard in range(num_workers):
tf.logging.info("Submitting shard %d", shard)
master = worker_master_pattern.format(shard)
results.append(
executor.submit(AISEvalShard, shard, master, num_workers, num_chains,
dataset, use_polyak_averaging, writer, train_dir,
model_fn, AIS_BATCH))
all_log_p = np.zeros([0])
for result in results:
log_p = result.result()
all_log_p = np.hstack([all_log_p, log_p])
log_p = | np.mean(all_log_p) | numpy.mean |
import numpy as np
from mlfromscratch.solvers.gradient_descent import *
from mlfromscratch.helpers import helpers as hp
class Logistic_regression(object):
"""
Logistic regression model.
For multiclass problems, the one-vs-rest (ovr) approach is used.
Parameters
----------
normalized: bool
whether features are normalized
determines whether or not an intercept is needed.
threshold: float, default=0.5
probability threshold for binary classification problems.
learning_rate: float, default=0.01
gradient descent step size (range 0-1)
max_iter: int, default=1000
maximum number of iterations for solver.
abs_tol: float, default=1e-9
absolute convergence tolerance for solver.
end if: |cost_{n+1} - cost_{n}| < abs_tol
"""
def __init__(self,normalized=False, threshold=0.5, learning_rate=0.01,
max_iter=1000, abs_tol=1e-9):
try:
learning_rate <= 1
learning_rate >= 0
except:
raise ValueError("Learning rate must be between 0-1")
self.learning_rate = learning_rate
self.threshold = threshold
self.max_iter = max_iter
self.abs_tol = abs_tol
self.normalized = normalized
def fit(self, X, y):
'''
Estimate the model parameters using the specified method.
Parameters
----------
X: np.array
feature matrix (m x n)
y: np.array
response vector (m x 1)
'''
# format np.arrays for regression
X,y = hp.format_reg(X, y, normalized=self.normalized)
# apply a one-hot-encoder to the response variable
encoded = hp.one_hot_encoder(y)
# based on the output of one-hot-encoder, determine whether
# the one-vs-rest (ovr) approach is needed
self.coef = np.zeros((encoded.shape[1],X.shape[1]))
for c in range(encoded.shape[1]):
# solve each binary classification problem through gradient descent
y_c = encoded[:,c].reshape(-1,1)
coef_c = self._gd(X, y_c)
self.coef[c,:] = coef_c.flatten()
def predict(self, X):
'''
Return the predicted value.
Parameters
----------
X: np.array
feature matrix (m x n)
'''
# format np.arrays for regression
X = hp.format_reg(X, normalized=self.normalized)
# calculate the predicted probability of membership to each class
pred_prob = hp.sigmoid(X.dot(self.coef.T))
# assign each X_i to the class with its highest predicting probability
if pred_prob.shape[1] == 1:
# binary case
y_pred = np.where(pred_prob>self.threshold, 1, 0).flatten()
else:
# multi class case
y_pred = np.argmax(pred_prob, axis=1)
return y_pred
### Private methods ###
def _gd(self, X, y):
'''
Fit model using the gradient descent method.
Encode cost and gradient as functions.
prediction:
=> 1/1+exp{- X @ coef}
cost:
=> (-y.T @ log{y_pred} - (1-y).T @ log{1 - y_pred})/m
gradient:
=> ((y_pred-y).T @ X)/m
Parameters
----------
X: np.array
feature matrix (m x n)
y: np.array
response vector (m x 1)
'''
# prediction function
def gd_predict(X, coef): return hp.sigmoid(X.dot(coef))
# MSE
def gd_cost(m, y, y_pred): return (
-y.T.dot(np.log(y_pred)) -
(1-y).T.dot( | np.log(1 - y_pred) | numpy.log |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def shi_tomashi(image):
"""
Use Shi-Tomashi algorithm to detect corners
Args:
image: np.array
Returns:
corners: list
"""
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
corners = cv2.goodFeaturesToTrack(gray, 4, 0.01, 100)
corners = | np.int0(corners) | numpy.int0 |
#Animation of ensemble simulations for ElEvoHI
# Author: <NAME>, <NAME>, Austria
# twitter @chrisoutofspace, https://github.com/cmoestl
# November 2018
# This work is published under the MIT LICENSE (see bottom)
import numpy as np
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import cm
from scipy import stats
import scipy.io
import sunpy.time
import time
import pickle
import seaborn as sns
import math
####################################################### functions
#for reading catalogues
def getcat(filename):
print( 'reading CAT '+filename)
cat=scipy.io.readsav(filename)#, verbose='false')
print( 'done reading CAT')
return cat
def decode_array(bytearrin):
#for decoding the strings from the IDL .sav file to a list of python strings, not bytes
#make list of python lists with arbitrary length
bytearrout= ['' for x in range(len(bytearrin))]
for i in range(0,len(bytearrin)-1):
bytearrout[i]=bytearrin[i].decode()
#has to be np array so to be used with numpy "where"
bytearrout=np.array(bytearrout)
return bytearrout
def time_to_num_cat(time_in):
#for time conversion from catalogue .sav to numerical time
#this for 1-minute data or lower time resolution
#for all catalogues
#time_in is the time in format: 2007-11-17T07:20:00 or 2007-11-17T07:20Z
#for times help see:
#http://docs.sunpy.org/en/latest/guide/time.html
#http://matplotlib.org/examples/pylab_examples/date_demo2.html
j=0
#time_str=np.empty(np.size(time_in),dtype='S19')
time_str= ['' for x in range(len(time_in))]
#=np.chararray(np.size(time_in),itemsize=19)
time_num=np.zeros(np.size(time_in))
for i in time_in:
#convert from bytes (output of scipy.readsav) to string
time_str[j]=time_in[j][0:16].decode()+':00'
year=int(time_str[j][0:4])
time_str[j]
#convert time to sunpy friendly time and to matplotlibdatetime
#only for valid times so 9999 in year is not converted
#pdb.set_trace()
if year < 2100:
time_num[j]=mdates.date2num(sunpy.time.parse_time(time_str[j]))
j=j+1
#the date format in matplotlib is e.g. 735202.67569444
#this is time in days since 0001-01-01 UTC, plus 1.
#return time_num which is already an array and convert the list of strings to an array
return time_num, | np.array(time_str) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from .comodulogram import multiple_band_pass
from .utils.peak_finder import peak_finder
from .utils.validation import check_consistent_shape, check_array
from .utils.validation import check_is_fitted
from .utils.viz import add_colorbar, mpl_palette
class PeakLocking(object):
"""An object to compute time average and time-frequency averaged with
peak-locking, to analyze phase-amplitude coupling.
Parameters
----------
fs : float
Sampling frequency
low_fq : float
Filtering frequency (phase signal)
low_fq_width : float
Bandwidth of the band-pass filter (phase signal)
high_fq_range : array or list, shape (n_high, ), or 'auto'
List of filtering frequencies (amplitude signal)
If 'auto', it uses np.linspace(low_fq, fs / 2, 40).
high_fq_width : float or 'auto'
Bandwidth of the band-pass filter (amplitude signal)
If 'auto', it uses 2 * low_fq.
t_plot : float
Time to plot around the peaks (in second)
filter_method : in {'mne', 'pactools'}
Choose band pass filtering method (in multiple_band_pass)
'mne': with mne.filter.band_pass_filter
'pactools': with pactools.fir.BandPassFilter (default)
peak_or_trough: in {'peak', 'trough'}
Lock to the maximum (peak) of minimum (trough) of the slow
oscillation.
percentiles : list of float or string, shape (n_percentiles, )
Percentile to compute for the time representation.
It can also include 'mean', 'std' or 'ste'
(resp. mean, standard deviation or standard error).
"""
def __init__(self, fs, low_fq, low_fq_width=1.0, high_fq_range='auto',
high_fq_width='auto', t_plot=1.0, filter_method='pactools',
peak_or_trough='peak', percentiles=['std+', 'mean', 'std-']):
self.fs = fs
self.low_fq = low_fq
self.high_fq_range = high_fq_range
self.low_fq_width = low_fq_width
self.high_fq_width = high_fq_width
self.t_plot = t_plot
self.filter_method = filter_method
self.peak_or_trough = peak_or_trough
self.percentiles = percentiles
def fit(self, low_sig, high_sig=None, mask=None):
"""
Compute peak-locked time-averaged and time-frequency representations.
Parameters
----------
low_sig : array, shape (n_epochs, n_points)
Input data for the phase signal
high_sig : array or None, shape (n_epochs, n_points)
Input data for the amplitude signal.
If None, we use low_sig for both signals
mask : array or None, shape (n_epochs, n_points)
The locking is only evaluated where the mask is False.
Masking is done after filtering.
ax_draw_peaks : boolean or matplotlib.axes.Axes instance
If True, plot the first peaks/troughs in the phase signal
Plot on the matplotlib.axes.Axes instance if given, or on a
new figure.
Attributes
----------
time_frequency_ : array, shape (n_high, n_window)
Time-frequency representation, averaged with peak-locking.
(n_window is the number of point in t_plot seconds)
time_average_ : array, shape (n_percentiles, n_window)
Time representation, averaged with peak-locking.
(n_window is the number of point in t_plot seconds)
"""
self.low_fq = np.atleast_1d(self.low_fq)
if self.high_fq_range == 'auto':
self.high_fq_range = np.linspace(self.low_fq[0], self.fs / 2.0, 40)
if self.high_fq_width == 'auto':
self.high_fq_width = 2 * self.low_fq[0]
self.high_fq_range = np.asarray(self.high_fq_range)
self.low_sig = check_array(low_sig)
self.high_sig = check_array(high_sig, accept_none=True)
if self.high_sig is None:
self.high_sig = self.low_sig
self.mask = check_array(mask, accept_none=True)
check_consistent_shape(self.low_sig, self.high_sig, self.mask)
# compute the slow oscillation
# 1, n_epochs, n_points = filtered_low.shape
filtered_low = multiple_band_pass(low_sig, self.fs, self.low_fq,
self.low_fq_width,
filter_method=self.filter_method)
self.filtered_low_ = filtered_low[0]
filtered_low_real = np.real(self.filtered_low_)
if False:
# find the peak in the filtered_low_real
extrema = 1 if self.peak_or_trough == 'peak' else -1
thresh = (filtered_low_real.max() - filtered_low_real.min()) / 10.
self.peak_loc, self.peak_mag = peak_finder_multi_epochs(
filtered_low_real, fs=self.fs, t_plot=self.t_plot,
mask=self.mask, thresh=thresh, extrema=extrema)
else:
# find the peak in the phase of filtered_low
phase = np.angle(self.filtered_low_)
if self.peak_or_trough == 'peak':
phase = (phase + 2 * np.pi) % (2 * np.pi)
self.peak_loc, _ = peak_finder_multi_epochs(
phase, fs=self.fs, t_plot=self.t_plot, mask=self.mask,
extrema=1)
self.peak_mag = filtered_low_real.ravel()[self.peak_loc]
# extract several signals with band-pass filters
# n_high, n_epochs, n_points = filtered_high.shape
self.filtered_high_ = multiple_band_pass(
self.high_sig, self.fs, self.high_fq_range, self.high_fq_width,
n_cycles=None, filter_method=self.filter_method)
# compute the peak locked time-frequency representation
time_frequency_ = peak_locked_time_frequency(
self.filtered_high_, self.fs, self.high_fq_range,
peak_loc=self.peak_loc, t_plot=self.t_plot, mask=self.mask)
# compute the peak locked time representation
# we don't need the mask here, since only the valid peak locations are
# kept in peak_finder_multi_epochs
time_average_ = peak_locked_percentile(self.low_sig[None, :], self.fs,
self.peak_loc, self.t_plot,
self.percentiles)
time_average_ = time_average_[0, :, :]
self.time_frequency_ = time_frequency_
self.time_average_ = time_average_
return self
def plot_peaks(self, ax=None):
check_is_fitted(self, 'filtered_low_')
# plot the filtered_low_real peaks
if not isinstance(ax, matplotlib.axes.Axes):
fig = plt.figure(figsize=(16, 5))
ax = fig.gca()
n_point_plot = min(3000, self.low_sig.shape[1])
time = np.arange(n_point_plot) / float(self.fs)
filtered = np.real(self.filtered_low_[0, :n_point_plot])
ax.plot(time, self.low_sig[0, :n_point_plot], label='signal')
ax.plot(time, filtered, label='driver')
ax.plot(self.peak_loc[self.peak_loc < n_point_plot] / float(self.fs),
self.peak_mag[self.peak_loc < n_point_plot], 'o',
label='peaks')
ax.set_xlabel('Time (sec)')
ax.set_title("Driver's peak detection")
ax.set_legend(loc=0)
def plot(self, axs=None, vmin=None, vmax=None, ylim=None):
"""
Returns
-------
fig : matplotlib.figure.Figure
Figure instance containing the plot.
"""
check_is_fitted(self, 'time_average_')
if axs is None:
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(8, 8))
axs = axs.ravel()
else:
fig = axs[0].figure
# plot the peak-locked time-frequency
ax = axs[0]
n_high, n_points = self.time_average_.shape
vmax = np.abs(self.time_frequency_).max() if vmax is None else vmax
vmin = -vmax
extent = (
-self.t_plot / 2,
self.t_plot / 2,
self.high_fq_range[0],
self.high_fq_range[-1], )
cax = ax.imshow(self.time_frequency_, cmap=plt.get_cmap('RdBu_r'),
vmin=vmin, vmax=vmax, aspect='auto', origin='lower',
interpolation='none', extent=extent)
# ax.set_xlabel('Time (sec)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title('Driver peak-locked Time-frequency decomposition')
# plot the colorbar
plt.tight_layout()
fig.subplots_adjust(right=0.85)
add_colorbar(fig, cax, vmin, vmax, unit='', ax=None)
# plot the peak-locked time
ax = axs[1]
labels = {
'std+': r'$\mu+\sigma$',
'std-': r'$\mu-\sigma$',
'ste+': r'$\mu+\sigma/\sqrt{n}$',
'ste-': r'$\mu-\sigma/\sqrt{n}$',
'mean': r'$\mu$',
}
colors = mpl_palette('viridis', n_colors=len(self.percentiles))
n_percentiles, n_points = self.time_average_.shape
time = (np.arange(n_points) - n_points // 2) / float(self.fs)
for i, p in enumerate(self.percentiles):
label = ('%d %%' % p) if isinstance(p, int) else labels[p]
ax.plot(time, self.time_average_[i, :], color=colors[i],
label=label)
ax.set_xlabel('Time (sec)')
ax.set_title('Driver peak-locked average of raw signal')
ax.legend(loc='lower center', ncol=5, labelspacing=0.)
ax.grid('on')
# make room for the legend or apply specified ylim
if ylim is None:
ylim = ax.get_ylim()
ylim = (ylim[0] - (ylim[1] - ylim[0]) * 0.2, ylim[1])
ax.set_ylim(ylim)
return fig
def peak_finder_multi_epochs(x0, fs=None, t_plot=None, mask=None, thresh=None,
extrema=1):
"""Call peak_finder for multiple epochs, and fill only one array
as if peak_finder was called with the ravelled array.
Also remove the peaks that are too close to the start or the end
of each epoch, and the peaks that are masked by the mask.
"""
n_epochs, n_points = x0.shape
peak_inds_list = []
peak_mags_list = []
for i_epoch in range(n_epochs):
peak_inds, peak_mags = peak_finder(x0[i_epoch], thresh=thresh,
extrema=extrema)
# remove the peaks too close to the start or the end
if t_plot is not None and fs is not None:
n_half_window = int(fs * t_plot / 2.)
selection = np.logical_and(peak_inds > n_half_window,
peak_inds < n_points - n_half_window)
peak_inds = peak_inds[selection]
peak_mags = peak_mags[selection]
# remove the masked peaks
if mask is not None:
selection = mask[i_epoch, peak_inds] == 0
peak_inds = peak_inds[selection]
peak_mags = peak_mags[selection]
peak_inds_list.extend(peak_inds + i_epoch * n_points)
peak_mags_list.extend(peak_mags)
if peak_inds_list == []:
raise ValueError("No %s detected. The signal might be to short, "
"or the mask to strong. You can also try to reduce "
"the plotted time window `t_plot`." %
["trough", "peak"][(extrema + 1) // 2])
return np.array(peak_inds_list), np.array(peak_mags_list)
def peak_locked_time_frequency(filtered_high, fs, high_fq_range, peak_loc,
t_plot, mask=None):
"""
Compute the peak-locked Time-frequency
"""
# normalize each signal independently
n_high, n_epochs, n_points = filtered_high.shape
# normalization is done everywhere, but mean is computed
# only where mask == 1
if mask is not None:
masked_filtered_high = filtered_high[:, mask == 0]
else:
masked_filtered_high = filtered_high.reshape(n_high, -1)
mean = masked_filtered_high.mean(axis=1)[:, None, None]
std = masked_filtered_high.std(axis=1)[:, None, None]
filtered_high -= mean
filtered_high /= std
# get the power (np.abs(filtered_high) ** 2)
filtered_high *= np.conj(filtered_high)
filtered_high = np.real(filtered_high)
# subtract the mean power.
if mask is not None:
masked_filtered_high = filtered_high[:, mask == 0]
else:
masked_filtered_high = filtered_high.reshape(n_high, -1)
mean = masked_filtered_high.mean(axis=1)[:, None, None]
filtered_high -= mean
# compute the evoked signals (peak-locked mean)
evoked_signals = peak_locked_percentile(filtered_high, fs, peak_loc,
t_plot)
evoked_signals = evoked_signals[:, 0, :]
return evoked_signals
def peak_locked_percentile(signals, fs, peak_loc, t_plot,
percentiles=['mean']):
"""
Compute the mean of each signal in signals, locked to the peaks
Parameters
----------
signals : shape (n_signals, n_epochs, n_points)
fs : sampling frequency
peak_loc : indices of the peak locations
t_plot : in second, time to plot around the peaks
percentiles: list of precentile to compute. It can also include 'mean',
'std' or 'ste' (mean, standard deviation or standard error).
Returns
-------
evoked_signals : array, shape (n_signals, n_percentiles, n_window)
"""
n_signals, n_epochs, n_points = signals.shape
n_window = int(fs * t_plot / 2.) * 2 + 1
n_percentiles = len(percentiles)
# build indices matrix: each line is a index range around peak locations
indices = np.tile(peak_loc, (n_window, 1)).T
indices = indices + np.arange(n_window) - n_window // 2
# ravel the epochs since we now have isolated events
signals = signals.reshape(n_signals, -1)
n_peaks = indices.shape[0]
assert n_peaks > 0
# compute the evoked signals (peak-locked mean)
evoked_signals = np.zeros((n_signals, n_percentiles, n_window))
for i_s in range(n_signals):
for i_p, p in enumerate(percentiles):
if isinstance(p, int):
evoked_signals[i_s, i_p] = np.percentile(signals[i_s][indices],
p, axis=0)
continue
mean = np.mean(signals[i_s][indices], axis=0)
if p == 'mean':
evoked_signals[i_s, i_p] = mean
else:
std = np.std(signals[i_s][indices], axis=0)
if p == 'std+':
evoked_signals[i_s, i_p] = mean + std
elif p == 'std-':
evoked_signals[i_s, i_p] = mean - std
elif p == 'ste+':
evoked_signals[i_s, i_p] = mean + std / | np.sqrt(n_peaks) | numpy.sqrt |
#-----------------------------------------------------------------------------
# Name: RadiCALModels
# Purpose: To handle output from the radiCAL program written by <NAME>
# Author: <NAME>
# Created: 9/8/2016
# License: MIT License
#-----------------------------------------------------------------------------
""" Handles the data saved after running Radical to analyze data. The assumed
format is .mat V7.3, if saved in the older format, resave by setting preferences
in matlab Environment->Preferences->General->MAT-Files->V7.3. This stores the result
as an hd5 file with a .mat extension. Previous versions of matlab can be handled using
scipy.io.loadmat, but this module does not use this function
Examples
--------
#!python
>>rad=RadicalDataModel("radical_datafile")
>>rad.show()
Requirements
------------
+ [sys](https://docs.python.org/2/library/sys.html)
+ [os](https://docs.python.org/2/library/os.html?highlight=os#module-os)
+ [types](https://docs.python.org/2/library/types.html)
+ [pyMez](https://github.com/aricsanders/pyMez)
+ [h5py][http://www.h5py.org/]
Help
---------------
<a href="./index.html">`pyMez.Code.DataHandlers`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples Home</a> |
<a href="../../../Reference_Index.html">Index</a>
</div>"""
#-----------------------------------------------------------------------------
# Standard Imports
import os
import sys
#-----------------------------------------------------------------------------
# Third Party Imports
# magic statement that injects the pyMez folder into sys.path
# This allows Code to be imported skipping pyMez/.__init__.py
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))
try:
from Code.DataHandlers.TouchstoneModels import *
except:
print("The module pyMez.Code.DataHandlers.TouchstoneModels was not found or had an error,"
"please put it on the python path")
raise
try:
import h5py
except:
print("The module h5py was not found or had an error,"
"please put it on the python path or resolve the error. (pip install h5py)")
raise
try:
import numpy as np
except:
print("The module numpy was not found or had an error,"
"please put it on the python path or resolve the error. (pip install numpy)")
raise
#-----------------------------------------------------------------------------
# Module Constants
#-----------------------------------------------------------------------------
# Module Functions
def radical_dataset_to_s2p(radical_data_set,frequency_list,**options):
"""Takes a radical data set that is of the form <HDF5 dataset "S1": shape (4, 512), type "|V16"> and outputs
an S2PV1 python model. Requires frequency_list=np.array(radical_data_file["RadiCalData/StatistiCalData/F"])[0].tolist()
to be passed"""
defaults={"frequency_selector":0,"frequency_column_name":"Frequency"}
s2p_options={}
for key,value in defaults.items():
s2p_options[key]=value
for key,value in options.items():
s2p_options[key]=value
input_data=np.array(radical_data_set)
sparameters=[]
for index,item in enumerate(frequency_list):
[S11,S21,S12,S22]=[complex(input_data[0][index][0],input_data[0][index][1]),
complex(input_data[1][index][0],input_data[1][index][1]),
complex(input_data[2][index][0],input_data[2][index][1]),
complex(input_data[3][index][0],input_data[3][index][1])]
new_row=[item,S11,S21,S12,S22]
sparameters.append(new_row)
new_s2p=S2PV1(None,sparameter_complex=sparameters,**s2p_options)
return new_s2p
def radical_frequency_to_frequency_list(radical_frequency, radical_data_file=None):
"""Takes either the string specifying the radical frequency location ("RadiCalData/StatistiCalData/F") and
radical data file
or the data set radical_data_file["RadiCalData/StatistiCalData/F"] and returns a python list of frequencies"""
try:
if type(radical_frequency) in StringTypes:
frequency_list = np.array(radical_data_file[radical_frequency])[0].tolist()
elif type(radical_frequency) in [h5py._hl.dataset.Dataset]:
frequency_list = np.array(radical_frequency)[0].tolist()
elif type(radical_frequency) in [h5py._hl.files.File]:
frequency_list = np.array(radical_frequency["RadiCalData/StatistiCalData/F"])[0].tolist()
except:
print(("Could not change {0} to a python list".format(radical_frequency)))
def radical_error_boxes_to_eight_term_complex(radical_s1, radical_s2, radical_frequency_list, radical_data_file=None):
"""Takes two radical error boxes and a frequency_list (in python format run radical_frequency_to_frequency_list first)
and converts them into a python list structure
[[f,S1_11,S1_12,S1_21,S1_22,S2_11,S2_12,S2_21,S_22]] where each component of a matrix is a complex number.
This list is designed to be used as an input for correct_sparameters_eight_term"""
try:
# fist convert the S1 to a numpy array the dimensions are 4 x number of frequencies x 2
if type(radical_s1) in StringTypes:
s1_numpy_array = np.array(radical_data_file[radical_s1])
elif type(radical_s1) in [h5py._hl.dataset.Dataset]:
s1_numpy_array = | np.array(radical_s1) | numpy.array |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import tensorflow as tf
from tensorflow import keras
#from keras import backend as K
import numpy as np
import time
import os
import matplotlib #.pyplot as plt
matplotlib.use('TkAgg') # this makes the fgire in focus rather than temrinal
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import gym
from DQN.qmemory import Qmemory
class deepQ:
""" Object for deep Q learning, for solving openai gym environments
deep Q network can be used for Q learning, to find the Q function that maximses
the reward, and effectively therefore gives an optimal stragey for the game.
The method used here contains various elements of the deepQ algorithm, namely:
experience replay, double Q learning, online and target networks with strided updates
"""
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self,game,HYPERPARAMS,PARAMS):
""" Initialize
Initialize the hyperparameters of the model, start the game environment,
setup the tensorflow graph, start a filenaming convention for results.
Args:
HYPERPARAMS:
a dictionary of hyperparameters:
* **ALPHA**: learning rate
* **GAMMA**: reward discount factor
* **EPSILON_H**: initial probability of random actions in training
* **EPSILON_L**: lowest probability of random actions in training
* **EPS_DECAY**: decay rate (units of frames) of epsilon (exp(-frame/EPS_DECAY))
* **EPI_START**: episode at which to begin training
* **N_FILTER**: Number of filters for initial convolutional layer
* **N_FC**: Number of hidden units in fully connected layer
* **N_memory**: Number of transitions to store
* **N_batch**: The mini-batch size
* **UPDATE_FREQ**: how many frames to train on between updates of target network
* **TERMINAL_POINTS**: count a single point loss as a terminal move (boolean)
* **LOSS_SCALE**: scale on Huber loss, for testing, keep as 2.0
PARAMS:
A dictionary of parameters of the model:
- **Nc**: number of frames in a single game state
- **OUTPUT_STEP**: How often (in episodes) to save output summaries
- **MAX_STEPS**: max number of frames allowed per episode
"""
self.env = gym.make(game)
self.HYPERPARAMS = HYPERPARAMS
self.PARAMS = PARAMS
self.N_action = self.env.action_space.n//2
# ^ this is specific to Pong, becuase half the actions have the same
# purose as the other actions.
# e.g actions 2 and 4 both move the paddle up
# 0: nothing, 1: nothing, 2:up, 3:down, 4:up, 5:down
# use instead x= 0,1,2, with mapping action = x+1,
# then x=0: nothing, x=1:up, x=2:down
# so we reduce the action space by half (from 6 to 3)
# and consequently action from Q value will be argmax(Q)+1
# n.b Q is vector of length N_action (i.e now 6//2 = 3)
# use the environment and model params to find useful quantities
frame = self.env.reset()
frame = self.preprocess(frame)
self.PARAMS['N_x'] = np.size(frame,0)
self.PARAMS['N_y'] = np.size(frame,1)
o1 = int( ( (self.PARAMS['N_x']-8)/4 ) + 1 )
o2 = int( ( (o1-4)/2 ) + 1 )
o3 = int( ( (o2-3)/1) + 1)
self.PARAMS['N_squash'] = o3
tf.reset_default_graph()
self.graph = tf.get_default_graph() #tf.Graph()
alpha_txt = f"alpha_{HYPERPARAMS['ALPHA']:.2e}_"
upd_txt = f"updfreq_{HYPERPARAMS['UPDATE_FREQ']:d}_"
decay_txt = f"EPSDECAY_{HYPERPARAMS['EPS_DECAY']:.1f}_"
nfc_txt = f"NFC_{HYPERPARAMS['N_FC']:d}_"
nfilt_txt = f"Nfilter_{HYPERPARAMS['N_FILTER']:d}_"
mem_txt = f"mem_{HYPERPARAMS['N_memory']:d}_"
batch_txt = f"batch_{HYPERPARAMS['N_batch']:d}_"
term_txt = f"terminal_{HYPERPARAMS['TERMINAL_POINTS']:d}"
self.params_text = alpha_txt+upd_txt+decay_txt+nfc_txt+\
nfilt_txt+mem_txt+batch_txt+term_txt
print("\n==========================================================")
print("\n\n filename for saving : ",self.params_text)
print(" action space has size : ",self.N_action)
print(" using tensorflow version : ",tf.VERSION, "\n\n")
print("==========================================================\n")
return None
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def preprocess(self,frame):
""" Preprocess frame of game
Args:
frame: a frame of the game
Returns:
frame_out: the preprocessed frame
"""
frame_out = np.zeros((84,84),dtype=np.uint8)
# to black and white
tmp = np.mean(frame, axis=2)
# trim edges
tmp = tmp[28:-12, :]
# downsample
tmp = tmp[1:-1:2,::2]
frame_out[:,2:-2] = tmp.astype(np.uint8)
return frame_out
def action2step(self,act):
""" Convert integer into game action
In order that Pong can have only 3 actions (nothing, up, down), rather
than the 6 (each action replicated) in the gym environment, use a
preprocessing for the actions.
Args:
act: integer representing an action
Returns:
step: an integer for the action, act, expected by the game
"""
step=act+1
return step
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
def Qnet(self,obs,call_type,trainme,reuseme):
""" Neural network to get Q for given state
Structure of the network is:
- convolutional layer (K=8,S=4) with N_FILTER filters
- convolutional layer (K=4,S=2) with 2*N_FILTER filters
- convolutional layer (K=3,S=1) with 2*N_FILTER filters
- Fully Connected layer with N_FC hidden units
It takes in input observation (a state of a game), and returns the predicted
value Q for this action. The maximal position within Q is the policy action.
Args:
obs: (tensor) set of observations to predict Q for: size: batch,(x,y..),frames
frames should be 4 to match deepmind paper.
call_type: 'online/' or 'target/' - which network to use
trainme: (bool) should the weights be trainable
reuseme: (bool) should the weights be reusable
Returns:
z_out: output of the Neural Net, which is the predicted Q for the observation
"""
with tf.variable_scope(call_type):
z = tf.reshape(obs, [-1,self.PARAMS['N_x'],self.PARAMS['N_y'],self.PARAMS['Nc']])
#print(z.shape)
with tf.variable_scope('conv_layer0',reuse=reuseme):
z_conv0 = tf.layers.Conv2D(filters = self.HYPERPARAMS['N_FILTER'],
kernel_size = (8,8),
strides = (4,4),
padding='valid',
activation=tf.nn.leaky_relu,
trainable=trainme,
kernel_initializer=tf.keras.initializers.he_normal())(z)
with tf.variable_scope('conv_layer1',reuse=reuseme):
z_conv1 = tf.layers.Conv2D(filters = 2*self.HYPERPARAMS['N_FILTER'],
kernel_size = (4,4),
strides = (2,2),
padding='valid',
activation=tf.nn.leaky_relu,
trainable=trainme,
kernel_initializer=tf.keras.initializers.he_normal())(z_conv0)
#z_conv1_flat = tf.reshape(z_conv1,[-1,self.PARAMS['N_squash']*self.PARAMS['N_squash']*(2*self.HYPERPARAMS['N_FILTER'])])
with tf.variable_scope('conv_layer2',reuse=reuseme):
z_conv2 = tf.layers.Conv2D(filters = 2*self.HYPERPARAMS['N_FILTER'],
kernel_size = (3,3),
strides = (1,1),
padding='valid',
activation=tf.nn.leaky_relu,
trainable=trainme,
kernel_initializer=tf.keras.initializers.he_normal())(z_conv1)
z_flat = tf.reshape(z_conv2,[-1,self.PARAMS['N_squash']*self.PARAMS['N_squash']*(2*self.HYPERPARAMS['N_FILTER'])])
with tf.variable_scope('FC_layer0',reuse=reuseme):
z_FC0 = tf.layers.Dense(units=self.HYPERPARAMS['N_FC'],activation=tf.nn.relu,trainable=trainme,kernel_initializer=tf.keras.initializers.he_normal())(z_flat)
with tf.variable_scope('layer_out',reuse=reuseme):
z_out = tf.layers.Dense(units=self.N_action,trainable=trainme,kernel_initializer=tf.keras.initializers.he_normal())(z_FC0)
return z_out
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
def update_layer(self,layer):
""" Update the weights/biases of target network
For stability, it is useful to actively train an online network, and
only periodically update a target network with the weights and biases of
the online network. This method updates a gicen layer in the target
network to be the same as the equivilent layer of the online network
Args:
layer: (string) name of layer. e.g. 'layer0'
Returns:
upd_k: operator that updates the kernel of the layer
epd_b: operator that updates the bias of the layer
"""
with tf.name_scope('get_online_wieghts'):
with tf.variable_scope('online/' + layer,reuse=True):
k_online = tf.get_variable('kernel')
b_online = tf.get_variable('bias')
with tf.name_scope('get_target_weights'):
with tf.variable_scope('target/' + layer,reuse=True):
k_target = tf.get_variable('kernel')
b_target = tf.get_variable('bias')
with tf.name_scope('assign_new_target_weights'):
upd_k = tf.assign(k_target,k_online)
upd_b = tf.assign(b_target,b_online)
return upd_k,upd_b
#---------------------------------------------------------------------------
def make_graph(self):
""" Define the computational graph
Takes in the game states (before and after action), action, reward, and
whether terminal as placeholders. Uses these to compute Q values for
both online and target networks. Applies the double deep Q learning
algorithm, using self.Qnet as the neural network which predicts the
Q values for a given state.
Placeholders:
the following variables should be set with a feed_dict
- **phi_i\_**: state before action
- **phi_j\_**: state after action
- **a_i\_**: action taken
- **r_i\_**: reward for taking action
- **t_i\_**: terminal move signifier (0 if final, 1 otherwise)
Returns:
graph_vars:
dictionary of variables of graph which are useful:
- **graph_init**: graph initializer (global)
- **graph_local_init**: graph initializer (local)
- **Q_i\_**:Q values predicted by Qnet on phi_i (online net)
- **loss\_**: loss on batch,
- **train_op**: training tf op
- **update_target**: updates target network weights to online weights
- **merged**: op to merge summaries for tensorboard
- **phi_i\_**: placeholder phi_i\_
- **phi_j\_**: placeholder phi_j\_
- **a_i\_**: placeholder a_i\_,
- **r_i\_**: placeholder r_i\_,
- **t_i\_**: placeholder t_i\_,
- **saver**: tf saver for saving meta graph and variables
"""
with self.graph.as_default():
# placeholders for the states, actions, rewards, and whether terminal
# size is batch, (x, y,), stored frames(4)
phi_i_ = tf.placeholder(shape=[None,self.PARAMS['N_x'],self.PARAMS['N_y'],self.PARAMS['Nc']],dtype=tf.float32)
phi_j_ = tf.placeholder(shape=[None,self.PARAMS['N_x'],self.PARAMS['N_y'],self.PARAMS['Nc']],dtype=tf.float32)
a_i_ = tf.placeholder(shape=[None,1],dtype=tf.uint8)
r_i_ = tf.placeholder(shape=[None,1],dtype=tf.float32)
t_i_ = tf.placeholder(shape=[None,1],dtype=tf.float32)
# ------------------------------------------------------------------
with tf.name_scope('Q_i_online'):
Q_i_ = self.Qnet(phi_i_,'online',True,False)
#print("Q_i_ shape = ",Q_i_.shape)
with tf.name_scope('Value_function_i_online'):
# convert actions that were taken into onehot format
a_list = tf.reshape(tf.cast(a_i_,tf.int32),[-1])
print("a_list shape = ",a_list.shape)
a_onehot = tf.one_hot(a_list, self.N_action)
print(a_onehot.shape)
# now use the onehot format actions to select the Q_i's that are actually
# obtained by taking action a_i. n.b Qnet returns a value for Q for all actions
# but we only want to know Q for the action taken
V_i_tmp = tf.multiply(a_onehot,Q_i_)
print(V_i_tmp.shape)
V_i_ = tf.reduce_sum(V_i_tmp, axis=1)
print(V_i_.shape)
# ------------------------------------------------------------------
# we need to get the actions to take on the Q_target step, by using the expected action
# that the online network predicts is best, but then use the Q from the target net with the
# online selected action
# this is the same network as for Q_i_ - we set reuse=True
# (it is also trainable)
with tf.name_scope('Qj_online'):
Qj_online_ = self.Qnet(phi_j_,'online',True,True)
Qj_online_inds = tf.argmax(Qj_online_,axis=1)
Qj_onehot_inds = tf.one_hot(Qj_online_inds, self.N_action)
# ------------------------------------------------------------------
# this has reuse=False, make a new network - the target network
# it is not trainable. Instead we train the online network and
# set the weights/biases of the layers in the target network to be the
# same as those in the online network every so many games.
with tf.name_scope('Qj_target'):
Q_j_ = self.Qnet(phi_j_,'target',False,False)
# now only take values of Q (target) for state j, using action that
# the online network would predict
with tf.name_scope('value_function_j'):
V_j_ = tf.reduce_sum(tf.multiply(Qj_onehot_inds,Q_j_),axis=1)
# ------------------------------------------------------------------
# get the future discounted reward
with tf.name_scope('discounted_reward'):
y_ = tf.add( tf.squeeze(r_i_) , self.HYPERPARAMS['GAMMA']*tf.multiply(tf.squeeze(t_i_),tf.squeeze(V_j_)))
print("y shape = ",y_.shape)
print("r_i_ shape = ",tf.squeeze(r_i_).shape)
# difference between value function (future discounted) and the value
# funtion on state i
with tf.name_scope('discount_take_value'):
x_ = tf.subtract( y_, V_i_ )
print("x_ shape = ",x_.shape)
# ------------------------------------------------------------------
# define the loss, create an optimizer op, and a training op
# use a Pseudo-Huber loss
with tf.name_scope('loss'):
loss_scale = self.HYPERPARAMS['LOSS_SCALE'] # how steep loss is for large values
loss_ = tf.reduce_mean( loss_scale*(tf.sqrt(1.0+(1.0/loss_scale)**2*tf.multiply(x_,x_)) - 1.0) )
with tf.name_scope('optimizer'):
optimizer = tf.train.RMSPropOptimizer(self.HYPERPARAMS['ALPHA'])
train_op = optimizer.minimize(loss_)
# ------------------------------------------------------------------
# update the parameters of the target network, by cloning those from
# online Q network. This will only be sess.run'ed every C steps
with tf.name_scope('target_updates'):
upd_c_k0,upd_c_b0 = self.update_layer('conv_layer0/conv2d')
upd_c_k1,upd_c_b1 = self.update_layer('conv_layer1/conv2d')
upd_c_k2,upd_c_b2 = self.update_layer('conv_layer2/conv2d')
upd_FC_k0,upd_FC_b0 = self.update_layer('FC_layer0/dense')
upd_k_out,upd_b_out = self.update_layer('layer_out/dense')
# group all of these update ops into a single op for updating the
# entire target network
update_target = tf.group(upd_c_k0, upd_c_b0, upd_c_k1, upd_c_b1,upd_c_k2, upd_c_b2, upd_FC_k0, upd_FC_b0, upd_k_out, upd_b_out)
# ------------------------------------------------------------------
# create some tenorboard outputs for real-time analysis
tf.summary.scalar('loss', tf.squeeze(loss_))
merged = tf.summary.merge_all()
# ------------------------------------------------------------------
graph_init = tf.global_variables_initializer()
graph_local_init = tf.local_variables_initializer()
saver = tf.train.Saver()
graph_vars = {'graph_init':graph_init,
'graph_local_init':graph_local_init,
'Q_i_':Q_i_,
'loss_':loss_,
'train_op':train_op,
'update_target':update_target,
'merged':merged,
'phi_i_':phi_i_,
'phi_j_':phi_j_,
'a_i_':a_i_,
'r_i_':r_i_,
't_i_':t_i_,
'saver':saver}
return graph_vars
def summary_hist(self,summary_,tag,data,bins):
""" Add Histogram to tensorboard summary
Args:
summary_: a tf summary object to add histogram to
tag: a name/tag for the histogram
data: The data to be plotted
bins: The number of bins for the histogram, or an array of bin edges
"""
npdata = np.asarray(data)
hist_vals, bin_edges = np.histogram(npdata,bins)
hist = tf.HistogramProto()
hist.min = np.min(npdata)
hist.max = np.max(npdata)
bin_edges = bin_edges[:-1]
for b in bin_edges:
hist.bucket_limit.append(b)
for hv in hist_vals:
hist.bucket.append(hv)
summary_.value.add(tag=tag,histo=hist)
return None
#---------------------------------------------------------------------------
def train(self, N_episodes):
"""Train the DeepQ network
Args:
N_epsiodes: how many episodes to train over
Returns:
out_dict: A dictionary of how various things evolved during during:
- rewards': average reward per epsiode
- 'steps': average steps per epsiode
- 'maxQ': average max_Q epsiode
- 'minQ': average min_Q per epsiode
- 'losses': average loss per epsiode
- 'actions': average action per epsiode
- 'epsilon': average epsilon per epsiode
"""
# define the computational graph for traing the Q network
graph_vars = self.make_graph()
# ----------------------------------------------------------------------
# ----------------- now use the graph above as the session -------------
with tf.Session(graph=self.graph) as sess:
sess.run(graph_vars['graph_init'])
sess.run(graph_vars['graph_local_init'])
print(tf.trainable_variables())
summary=tf.Summary()
#writer = tf.summary.FileWriter('%s/%s' % ('./../data_summaries', time.strftime("%Y%m%d-%H%M%S")),sess.graph)
writer = tf.summary.FileWriter('%s/%s' % ('./../data_summaries', self.params_text),sess.graph)
# ------------------------------------------------------------------
# arrays in which to store memory of states it has seen
# CREATE TWO MEMORY TYPES
# 'normal' memory - stores non-final moves
# 'losses' memory - stores only final (losing) moves
# the idea of this is to keep a consistent number of losing/winning
# and 'normal' moves, so that the number of each type used in training
# stays consistent
N_mem_normal = int(0.7*self.HYPERPARAMS['N_memory'])
N_mem_losses = int(0.15*self.HYPERPARAMS['N_memory']) # - N_mem_normal #int(0.05*self.HYPERPARAMS['N_memory'])
N_mem_wins = self.HYPERPARAMS['N_memory'] - N_mem_normal - N_mem_losses
memory_normal = Qmemory(N_mem_normal,self.PARAMS['N_x'],self.PARAMS['N_y'],self.PARAMS['Nc'])
memory_wins = Qmemory(N_mem_wins ,self.PARAMS['N_x'],self.PARAMS['N_y'],self.PARAMS['Nc'])
memory_losses = Qmemory(N_mem_losses,self.PARAMS['N_x'],self.PARAMS['N_y'],self.PARAMS['Nc'])
# also define how big each batch should be
N_batch_l = int(0.15*self.HYPERPARAMS['N_batch'])
N_batch_w = N_batch_l
N_batch_n = self.HYPERPARAMS['N_batch'] - N_batch_w - N_batch_l
print(N_batch_n,N_batch_l,N_batch_w)
# ------------------------------------------------------------------
# counter for number of steps taken
steps_count = 0
# initialise arrays for storing average values of quantities
reward_p_ep = np.zeros((int(N_episodes/self.PARAMS['OUTPUT_STEP']),))
steps_p_ep = np.zeros_like(reward_p_ep)
avQ_p_ep = np.zeros_like(reward_p_ep)
max_Q_p_ep = np.zeros_like(reward_p_ep)
min_Q_p_ep = np.zeros_like(reward_p_ep)
loss_p_ep = np.zeros_like(reward_p_ep)
av_action_p_ep = np.zeros_like(reward_p_ep)
Q_init_0_p_ep = np.zeros_like(reward_p_ep)
epsilon_ep = np.zeros_like(reward_p_ep)
#these just for testing
init_obs = self.env.reset()
init_obs = self.preprocess(init_obs)
init_phi = np.tile( init_obs[:,:,np.newaxis], (1,1,self.PARAMS['Nc']) )
out_count=0
# --------------- loop over games ----------------------------------
time_ep1 = time.time()
for epi in np.arange(N_episodes):
# reset the game, and initialise states
done=False
current_obs = self.env.reset()
current_obs = self.preprocess(current_obs)
current_phi = np.tile( current_obs[:,:,np.newaxis], (1,1,self.PARAMS['Nc']) )
new_obs= np.zeros_like(current_obs)
new_phi = np.zeros_like(current_phi)
# --------------------------------------------------------------
# define epsilon (chance to make move based on policy vs random)
# for the fist 'EPI_START' episodes only make random moves
# after this, decay epsilon exponentially according to total steps
# taken during training
if epi<self.HYPERPARAMS['EPI_START']:
eps_tmp = self.HYPERPARAMS['EPSILON_H']
else:
eps_tmp = self.HYPERPARAMS['EPSILON_L'] + (self.HYPERPARAMS['EPSILON_H'] - self.HYPERPARAMS['EPSILON_L'])*np.exp(-(steps_count*1.0)/self.HYPERPARAMS['EPS_DECAY'])
# --------------------------------------------------------------
# initilaize counters
tot_reward = 0.0
steps_used = 0.0
maxQ = 0.0
# reset the lists to empty at beginning of new avergaing period
if ((np.mod(epi,self.PARAMS['OUTPUT_STEP'])==1 and epi>1) or epi==0):
losses=[]
av_acts=[]
maxQs = []
minQs = []
steps_list=[]
reward_list = []
# ---------- LOOP over frames in a given episode ---------------
for i in np.arange(self.PARAMS['MAX_STEPS']):
# get action using the Q net, or at random
if np.random.uniform() < eps_tmp:
action = np.asarray(np.random.randint(self.N_action))
new_obs, reward, done, info = self.env.step(self.action2step(action))
else:
# feed data into the session graph to get Q as a numpy array
# only phi_i is actual data
# phi_j, a_i, r_i are just dummy really as not used
tmp_feed_dict = {graph_vars['phi_i_']:current_phi[np.newaxis,:,:,:]/255.0,
graph_vars['phi_j_']:current_phi[np.newaxis,:,:,:]/255.0,
graph_vars['a_i_']:memory_normal.memory_a_i[:1,:],
graph_vars['r_i_']:memory_normal.memory_r_i[:1,:],
graph_vars['t_i_']:memory_normal.memory_terminal_i[:1,:]}
# use Q network graph to get Q_i, uses the online network
Q = np.squeeze(sess.run([graph_vars['Q_i_']],tmp_feed_dict))
# append the max and min Q to the lists (will be averaged later)
maxQs.append(np.amax(Q))
minQs.append(np.amin(Q))
# the action to be taken, is one that maximises Q
action = np.argmax(Q)
new_obs, reward, done, info = self.env.step(self.action2step(action))
av_acts.append(action)
# ----------------------------------------------------------
# preprocess the image
new_obs = self.preprocess(new_obs)
# phi is made of several observations/frames. so concatenate
# the the current phi (all but first frame), with the new observation
# this then becomes the new state containg 'Nc' frames
new_phi = np.concatenate((current_phi[:,:,1:],new_obs[:,:,np.newaxis]), axis=2)
# convert the boolean 'done' which tells us if move is the last
# of game, to a float (number). we want it to be 0.0 when it is
# the final move - so we need the opposite of normal conversion
# of bool->float - so use: (not done)
if not self.HYPERPARAMS['TERMINAL_POINTS']:
term_float = np.array(not done)
term_float = term_float.astype(np.float32)
else:
term_float = np.array(reward > -0.1).astype(np.float32)
tot_reward+=reward
# ----------------------------------------------------------
# WRITE new experience to MEMORY
# only perform if we are more than 'Nc' moves into this game.
# this is because we need to have phi states of length 'Nc'
# and they are initialised at beginning of game to be the initial
# frame repeated Nc times, which would be unrealistic - so do
# not add to memory.
if i>=(self.PARAMS['Nc']-1):
if reward > 0.1:
#print("writing win, ",reward)
memory_wins.write(current_phi, new_phi, action, reward, term_float)
elif reward > -0.1:
#print("writing normal, ",reward)
memory_normal.write(current_phi, new_phi, action, reward, term_float)
else:
#print("writing loss, ",reward)
memory_losses.write(current_phi, new_phi, action, reward, term_float)
# ----------------------------------------------------------
# APPLY LEARING UPDATES
# ----------------------------------------------------------
# take a batch of the experiences from memory
# only do if experience memory is big enough to contain N_batch entries
#if (mem_count>self.HYPERPARAMS['N_batch']):
if(epi>self.HYPERPARAMS['EPI_START']):
# define sizes of batches for the 'normal' memory, and
# batch size for the 'losses' memory.
batch_n = memory_normal.get_batch(N_batch_n)
batch_l = memory_losses.get_batch(N_batch_l)
batch_w = memory_losses.get_batch(N_batch_w)
# combine the batches from both memories to create a single
# batch which represents 'normal' and 'loss' moves with a
# predetermined ratio.
phi_i_batch = np.concatenate((batch_n['phi_i'], batch_l['phi_i'], batch_w['phi_i']) , axis=0)/255.0
phi_j_batch = np.concatenate((batch_n['phi_j'], batch_l['phi_j'], batch_w['phi_j'] ) , axis=0)/255.0
a_i_batch = np.concatenate((batch_n['a_i'] , batch_l['a_i'], batch_w['a_i']) , axis=0)
r_i_batch = np.concatenate((batch_n['r_i'] , batch_l['r_i'], batch_w['r_i']) , axis=0)
t_i_batch = np.concatenate((batch_n['t_i'] , batch_l['t_i'], batch_w['t_i']) , axis=0)
#print(np.shape(phi_i_batch))
feed_dict_batch = { graph_vars['phi_i_']:(phi_i_batch).astype(np.float32),
graph_vars['phi_j_']:(phi_j_batch).astype(np.float32),
graph_vars['r_i_']:r_i_batch,
graph_vars['a_i_']:a_i_batch,
graph_vars['t_i_']:t_i_batch}
# get the loss for this batch
loss0 = sess.run(graph_vars['loss_'],feed_dict=feed_dict_batch)
# append loss to be averaged later
losses.append(loss0)
# APPLY GRADIENT DESCENT for batch
# only perform if episopde is > EPI_START
# if(epi==self.HYPERPARAMS['EPI_START']):
# graph_vars['train_op'].run(feed_dict=feed_dict_batch,options=run_options,run_metadata=run_metadata)
# sess.run(tmp_loss,graph_vars['train_op'],)
if(epi>self.HYPERPARAMS['EPI_START']):
graph_vars['train_op'].run(feed_dict=feed_dict_batch)
# ----------------------------------------------------------
# prepare for beginning a new game, update counters etc
# RESET what the current phi is for the next step
current_phi = 1.0*new_phi
# if we are in the training period - add one to total number
# of steps taken total over all episodes
if epi>self.HYPERPARAMS['EPI_START']:
steps_count+=1
# step counter for each episode
steps_used+=1.0
if (np.mod(steps_count,self.HYPERPARAMS['UPDATE_FREQ'])==0 and steps_count>0):
#update the layers by running the update ops...
sess.run(graph_vars['update_target'])
# stop playing this game, if the move just performed was terminal
if (done):
break
# --------------------------------------------------------------
# this episode has now been played
# make updates to quantities
steps_list.append(steps_used)
reward_list.append(tot_reward)
# if this game is a muliple of OUTPUT_STEP then average useful
# quantities over the last OUTPUT_STEP games and write to output
# arrays.
if (np.mod(epi+1,self.PARAMS['OUTPUT_STEP'])==0 and epi>0):
steps_p_ep[out_count] = np.sum(np.asarray(steps_list))/(1.0*(0.00001+len(steps_list)))
reward_p_ep[out_count] = np.sum(np.asarray(reward_list))/(1.0*(0.00001+len(reward_list)))
max_Q_p_ep[out_count] = np.sum(np.asarray(maxQs))/(1.0*(0.00001+len(maxQs)))
min_Q_p_ep[out_count] = np.sum(np.asarray(minQs))/(1.0*(0.00001+len(minQs)))
loss_p_ep[out_count] = sum(losses)/(1.0*(0.01+len(losses)))
av_action_p_ep[out_count] = sum(av_acts)/(1.0*(0.0001+len(av_acts)))
epsilon_ep[out_count] = eps_tmp
summarynew = tf.Summary(value=[tf.Summary.Value(tag='avg steps', simple_value=steps_p_ep[out_count])])
summarynew.value.add(tag='avg reward', simple_value=reward_p_ep[out_count])
summarynew.value.add(tag='avg max Q', simple_value=max_Q_p_ep[out_count])
summarynew.value.add(tag='avg min Q', simple_value=min_Q_p_ep[out_count])
summarynew.value.add(tag='avg loss', simple_value=loss_p_ep[out_count])
summarynew.value.add(tag='avg action',simple_value=av_action_p_ep[out_count])
summarynew.value.add(tag='epsilon', simple_value=eps_tmp)
self.summary_hist(summarynew,'score hist',reward_list,60)
self.summary_hist(summarynew,'steps hist',steps_list,100)
# ALSO: at the output points, make a validation check
# run a game with no random moves: what is score
avg_valid_reward = 0.0
N_valid = 3
for j in np.arange(N_valid):
valid_reward = 0.0
current_obs = self.env.reset()
current_obs = self.preprocess(current_obs)
current_phi = np.tile( current_obs[:,:,np.newaxis], (1,1,self.PARAMS['Nc']) )
for i in np.arange(self.PARAMS['MAX_STEPS']):
# get action using the Q net
tmp_feed_dict = {graph_vars['phi_i_']:current_phi[np.newaxis,:,:,:]/255.0,
graph_vars['phi_j_']:current_phi[np.newaxis,:,:,:]/255.0,
graph_vars['a_i_']:memory_normal.memory_a_i[:1,:],
graph_vars['r_i_']:memory_normal.memory_r_i[:1,:],
graph_vars['t_i_']:memory_normal.memory_terminal_i[:1,:]}
# use Q network graph to get Q_i, uses the online network
Q = np.squeeze(sess.run([graph_vars['Q_i_']],tmp_feed_dict))
# the action to be taken, is one that maximises Q
action = np.argmax(Q)
new_obs, reward, done, info = self.env.step(self.action2step(action))
new_obs = self.preprocess(new_obs)
new_phi = np.concatenate((current_phi[:,:,1:],new_obs[:,:,np.newaxis]), axis=2)
current_phi = 1.0*new_phi
valid_reward+=reward
if (done):
break
avg_valid_reward+=valid_reward/float(N_valid)
#avg_valid_reward = avg_valid_reward*1.0/float(N_valid)
summarynew.value.add(tag='avg validation reward', simple_value=avg_valid_reward)
#print("wirting summary")
writer.add_summary(summarynew, epi+1)
# writer.flush()
if epi<self.PARAMS['OUTPUT_STEP']:
print("getting meta data for loss (not update or train)")
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(graph_vars['loss_'],feed_dict=tmp_feed_dict,options=run_options,run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'epi-%d'%epi)
# else:
# writer.add_summary(summarynew, epi+1)
writer.flush()
time_ep2 = time.time()
print("epsiode {a:d} --- avg/max steps = {b:.1f} / {maxsteps:.1f} --- avg/max/valid reward = {c:.1f} / {maxre:.1f} / {validre:.1f} --- epsilon = {d:.2f} --- time = {e:.2f} \n".format(a=epi+1,b=steps_p_ep[out_count],maxsteps=np.amax(np.asarray(steps_list)),c=reward_p_ep[out_count],maxre=np.amax( | np.asarray(reward_list) | numpy.asarray |
"""
Functions for creating the standard sets of matrices in the standard, Pauli, Gell-Mann, and qutrit bases
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import itertools as _itertools
import numbers as _numbers
import numpy as _np
import scipy.sparse as _sps
## Pauli basis matrices
sqrt2 = _np.sqrt(2)
id2x2 = | _np.array([[1, 0], [0, 1]]) | numpy.array |
import gym
import time
import numpy as np
from pprint import pprint
import torch
import torch.nn as nn
import torch.optim as optim
from .buffer import PPOBuffer
from .model import PPOLSTMActorCritic
from rltorch.utils.rl_logger import RLLogger
class PPOLSTMAgent:
def __init__(self, **kwargs):
print("\nPPO with config:")
pprint(kwargs)
self.seed = kwargs["seed"]
torch.manual_seed(self.seed)
np.random.seed(self.seed)
self.env_name = kwargs["env_name"]
self.env = gym.make(self.env_name)
self.num_actions = self.env.action_space.n
self.obs_dim = self.env.observation_space.shape
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device={self.device}")
self.logger = RLLogger(self.env_name, "ppolstm")
self.setup_logger()
# Hyper params
self.steps_per_epoch = kwargs["epoch_steps"]
self.epochs = kwargs["epochs"]
self.max_ep_len = kwargs["max_ep_len"]
self.clip_ratio = kwargs["clip_ratio"]
self.target_kl = kwargs["target_kl"]
self.train_actor_iters = kwargs["train_actor_iters"]
self.train_critic_iters = kwargs["train_critic_iters"]
self.model_save_freq = kwargs["model_save_freq"]
self.buffer = PPOBuffer(self.steps_per_epoch, self.obs_dim, kwargs["hidden_size"],
kwargs["gamma"], kwargs["lam"], self.device)
self.actor_critic = PPOLSTMActorCritic(self.obs_dim[0], kwargs["hidden_size"], self.num_actions)
self.actor_critic.to(self.device)
print("\nActorCritic:")
print(self.actor_critic)
self.actor_optimizer = optim.Adam(self.actor_critic.actor.parameters(), lr=kwargs["actor_lr"])
self.critic_optimizer = optim.Adam(self.actor_critic.critic.parameters(), lr=kwargs["critic_lr"])
self.critic_loss_fn = nn.MSELoss()
def setup_logger(self):
# adds headers of interest
self.logger.add_header("epoch")
self.logger.add_header("total_steps")
self.logger.add_header("avg_ep_return")
self.logger.add_header("min_ep_return")
self.logger.add_header("max_ep_return")
self.logger.add_header("avg_vals")
self.logger.add_header("min_vals")
self.logger.add_header("max_vals")
self.logger.add_header("avg_ep_len")
self.logger.add_header("actor_loss")
self.logger.add_header("actor_loss_delta")
self.logger.add_header("critic_loss")
self.logger.add_header("critic_loss_delta")
self.logger.add_header("kl")
self.logger.add_header("entropy")
self.logger.add_header("time")
def get_action(self, obs, hid):
return self.actor_critic.act(obs, hid)
def compute_actor_loss(self, data):
obs, act, adv, logp_old = data["obs"], data["act"], data["adv"], data["logp"]
hid = data["actor_hid"]
pi, logp, _ = self.actor_critic.actor.step(obs, act, hid)
logp = logp.squeeze()
ratio = torch.exp(logp - logp_old)
clipped_ratio = torch.clamp(ratio, 1-self.clip_ratio, 1+self.clip_ratio)
clip_adv = clipped_ratio * adv
actor_loss = -(torch.min(ratio * adv, clip_adv)).mean()
actor_loss_info = dict()
actor_loss_info["kl"] = (logp_old - logp).mean().item()
actor_loss_info["entropy"] = pi.entropy().mean().item()
return actor_loss, actor_loss_info
def compute_critic_loss(self, data):
obs, ret, hid = data["obs"], data["ret"], data["critic_hid"]
predicted_val, _ = self.actor_critic.critic.forward(obs, hid)
predicted_val = predicted_val.squeeze()
return self.critic_loss_fn(predicted_val, ret)
def optimize(self):
data = self.buffer.get()
actor_loss_start, actor_loss_info_start = self.compute_actor_loss(data)
actor_loss_start = actor_loss_start.item()
critic_loss_start = self.compute_critic_loss(data).item()
for i in range(self.train_actor_iters):
self.actor_optimizer.zero_grad()
actor_loss, actor_loss_info = self.compute_actor_loss(data)
if actor_loss_info["kl"] > 1.5*self.target_kl:
break
actor_loss.backward()
self.actor_optimizer.step()
for i in range(self.train_critic_iters):
self.critic_optimizer.zero_grad()
critic_loss = self.compute_critic_loss(data)
critic_loss.backward()
self.critic_optimizer.step()
# calculate changes in loss, for logging
actor_loss_delta = (actor_loss.item() - actor_loss_start)
critic_loss_delta = (critic_loss.item() - critic_loss_start)
self.logger.log("actor_loss", actor_loss_start)
self.logger.log("actor_loss_delta", actor_loss_delta)
self.logger.log("critic_loss", critic_loss_start)
self.logger.log("critic_loss_delta", critic_loss_delta)
self.logger.log("kl", actor_loss_info_start["kl"])
self.logger.log("entropy", actor_loss_info_start["entropy"])
def step(self, obs, actor_hid, critic_hid):
return self.actor_critic.step(self.process_single_obs(obs), actor_hid, critic_hid)
def get_value(self, obs, critic_hid):
return self.actor_critic.get_value(self.process_single_obs(obs), critic_hid)
def process_single_obs(self, obs):
proc_obs = torch.from_numpy(obs).float().to(self.device)
proc_obs = proc_obs.view(1, 1, -1)
return proc_obs
def train(self):
print("PPO Starting training")
start_time = time.time()
for epoch in range(self.epochs):
self.logger.log("epoch", epoch)
o = self.env.reset()
epoch_ep_rets = []
epoch_ep_lens = []
ep_ret, ep_len = 0, 0
epoch_vals = []
actor_hid, critic_hid = self.actor_critic.get_init_hidden()
for t in range(self.steps_per_epoch):
a, v, logp, next_actor_hid, next_critic_hid = self.step(o, actor_hid, critic_hid)
next_o, r, d, _ = self.env.step(a.squeeze())
ep_len += 1
ep_ret += r
epoch_vals.append(v)
self.buffer.store(o, a, r, v, logp, actor_hid, critic_hid)
o = next_o
actor_hid = next_actor_hid
critic_hid = next_critic_hid
timeout = ep_len == self.max_ep_len
terminal = timeout or d
epoch_ended = t == self.steps_per_epoch-1
if terminal or epoch_ended:
v = 0
if timeout or epoch_ended:
v, next_critic_hid = self.get_value(o, critic_hid)
self.buffer.finish_path(v)
if terminal:
epoch_ep_rets.append(ep_ret)
epoch_ep_lens.append(ep_len)
ep_ret, ep_len = 0, 0
o = self.env.reset()
actor_hid, critic_hid = self.actor_critic.get_init_hidden()
# update the model
self.optimize()
# save model
if (epoch+1) % self.model_save_freq == 0:
print(f"Epoch {epoch+1}: saving model")
save_path = self.logger.get_save_path("pth")
self.actor_critic.save_AC(save_path)
self.logger.log("total_steps", (epoch+1)*self.steps_per_epoch)
self.logger.log("avg_ep_return", np.mean(epoch_ep_rets))
self.logger.log("min_ep_return", np.min(epoch_ep_rets))
self.logger.log("max_ep_return", | np.max(epoch_ep_rets) | numpy.max |
"""
@author: <NAME>
Plots the Pearson correlation coefficient, Spearman correlation coefficient,
Distance correlation coefficient, Chatterjee's correlation coefficient
and Wasserstein correlation coefficient between (X_1, f(X_2))
for the bivariate uniform distribution (X_1, X_2) as a function of the
correlation rho for different functions f(x)
"""
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from scipy.stats import pearsonr,spearmanr
from scipy.spatial.distance import pdist, squareform
from xicor.xicor import Xi
import ot
def distcorr(X, Y):
X = np.atleast_1d(X)
Y = np.atleast_1d(Y)
if np.prod(X.shape) == len(X):
X = X[:, None]
if np.prod(Y.shape) == len(Y):
Y = Y[:, None]
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:
raise ValueError('Number of samples must match')
a = squareform(pdist(X))
b = squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum()/float(n * n)
dcov2_xx = (A * A).sum()/float(n * n)
dcov2_yy = (B * B).sum()/float(n * n)
dcor = np.sqrt(dcov2_xy)/np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
return dcor
def adapW1_eot(x,y,N):
x_new = N**(-1/3)*np.floor(N**(1/3)*x)
y_new = N**(-1/3)*np.floor(N**(1/3)*y)
x_val = np.array(list(Counter(x_new).keys()))
x_freq = np.array(list(Counter(x_new).values()))
W = np.zeros(len(x_val))
for i in range(0,len(x_val)):
aux = y_new[x_new==x_val[i]]
aux = aux.reshape((len(aux), 1))
c = np.abs(aux-y_new)
w1 = np.ones(len(aux))/len(aux)
w2 = np.ones(len(y))/len(y)
W[i] = ot.sinkhorn2(w1,w2,c,0.01)
c = np.abs(y_new.reshape((N,1))-y_new)
denom = c.sum()/N**2
return np.dot(W, x_freq)/(N*denom)
N = 1000 #no. of samples
M = 30 #no. of draws
lam = np.linspace(0,1, num = 100)
Wcor = np.zeros(len(lam))
pcor = np.zeros(len(lam))
scor = np.zeros(len(lam))
dcor = np.zeros(len(lam))
ccor = np.zeros(len(lam))
Wcor_aux = np.zeros(M)
pcor_aux = np.zeros(M)
scor_aux = np.zeros(M)
dcor_aux = np.zeros(M)
ccor_aux = | np.zeros(M) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.contrast.barten1999` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.contrast import (optical_MTF_Barten1999, pupil_diameter_Barten1999,
sigma_Barten1999, retinal_illuminance_Barten1999,
maximum_angular_size_Barten1999,
contrast_sensitivity_function_Barten1999)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestOpticalMTFBarten1999', 'TestPupilDiameterBarten1999',
'TestSigmaBarten1999', 'TestRetinalIlluminanceBarten1999',
'TestMaximumAngularSizeBarten1999',
'TestContrastSensitivityFunctionBarten1999'
]
class TestOpticalMTFBarten1999(unittest.TestCase):
"""
Defines :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition unit tests methods.
"""
def test_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition.
"""
np.testing.assert_almost_equal(
optical_MTF_Barten1999(4, 0.01), 0.968910791191297, decimal=7)
np.testing.assert_almost_equal(
optical_MTF_Barten1999(8, 0.01), 0.881323136669471, decimal=7)
np.testing.assert_almost_equal(
optical_MTF_Barten1999(4, 0.05), 0.454040738727245, decimal=7)
def test_n_dimensional_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition n-dimensional support.
"""
u = np.array([4, 8, 12])
sigma = np.array([0.01, 0.05, 0.1])
M_opt = optical_MTF_Barten1999(u, sigma)
u = np.tile(u, (6, 1))
sigma = np.tile(sigma, (6, 1))
M_opt = np.tile(M_opt, (6, 1))
np.testing.assert_almost_equal(
optical_MTF_Barten1999(u, sigma), M_opt, decimal=7)
u = np.reshape(u, (2, 3, 3))
sigma = np.reshape(sigma, (2, 3, 3))
M_opt = np.reshape(M_opt, (2, 3, 3))
np.testing.assert_almost_equal(
optical_MTF_Barten1999(u, sigma), M_opt, decimal=7)
@ignore_numpy_errors
def test_nan_optical_MTF_Barten1999(self):
"""
Tests :func:`colour.contrast.barten1999.optical_MTF_Barten1999`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
optical_MTF_Barten1999(np.array(case), | np.array(case) | numpy.array |
import pandas
import numpy
import sys
import unittest
import os
import tempfile
import random
import string
import json
import copy
import warnings
sys.path.append("..")
import nPYc
from generateTestDataset import generateTestDataset
from nPYc.enumerations import VariableType
class test_dataset_synthetic(unittest.TestCase):
"""
Test Dataset object functions with synthetic data
"""
def setUp(self):
# Load empty object and populate with sythetic data.
# Empty object
self.data = nPYc.Dataset()
validChars = string.ascii_letters + string.digits
# Function to generate random strings:
def randomword(length):
return ''.join(random.choice(validChars) for i in range(length))
# Randomly sized intensity data
self.name = randomword(10)
self.noFeat = numpy.random.randint(3,100)
self.noSamp = numpy.random.randint(3,100)
self.data._intensityData = numpy.random.rand(self.noSamp,self.noFeat)
self.data.sampleMetadata['Sample File Name'] = list(map(str, numpy.linspace(1, self.noSamp, num=self.noSamp, dtype=int)))
self.data.sampleMetadata['Sample Metadata'] = [randomword(10) for x in range(0, self.noSamp)]
self.data.featureMetadata['Feature Name'] = list(map(str, numpy.linspace(1, self.noFeat, num=self.noFeat, dtype=int)))
self.data.featureMetadata['Feature Metadata'] = [randomword(10) for x in range(0, self.noFeat)]
self.data.VariableType = VariableType.Discrete
def test_nofeatures(self):
self.assertEqual(self.data.noFeatures, self.noFeat)
def test_name(self):
self.data.name = self.name
self.assertEqual(self.data.name, self.name)
def test_name_raises(self):
with self.assertRaises(TypeError):
self.data.name = 5
def test_normalisation(self):
from nPYc.utilities import normalisation
with self.subTest(msg='Check initialised with a NullNormaliser'):
self.assertIsInstance(self.data.Normalisation, normalisation.NullNormaliser)
numpy.testing.assert_equal(self.data.intensityData, self.data._intensityData)
with self.subTest(msg='Check swap to TA normaliser'):
self.data.Normalisation = normalisation.TotalAreaNormaliser()
taNormaliser = normalisation.TotalAreaNormaliser()
numpy.testing.assert_array_equal(self.data.intensityData, taNormaliser.normalise(self.data._intensityData))
def test_normalisation_raises(self):
with self.assertRaises(TypeError):
self.data.Normalisation = 'Not a Normaliser'
def test_nosamples(self):
self.assertEqual(self.data.noSamples, self.noSamp)
def test__repr__(self):
pointer = id(self.data)
reprString = str(self.data)
testString = "<%s instance at %s, named %s, with %d samples, %d features>" % (nPYc.Dataset().__class__.__name__, pointer, nPYc.Dataset().__class__.__name__, self.noSamp, self.noFeat)
self.assertEqual(reprString, testString)
def test_initialisemasks(self):
self.data.initialiseMasks()
featureMask = numpy.squeeze(numpy.ones([self.noFeat, 1], dtype=bool))
sampleMask = numpy.squeeze(numpy.ones([self.noSamp, 1], dtype=bool))
with self.subTest(msg='Checking featureMask.'):
numpy.testing.assert_equal(self.data.featureMask, featureMask)
with self.subTest(msg='Checking sampleMask.'):
numpy.testing.assert_equal(self.data.sampleMask, sampleMask)
def test_applymasks(self):
# exclude feature 2, samples 1 and 3
featureMask = numpy.squeeze(numpy.ones([self.noFeat, 1], dtype=bool))
featureMask[1] = False
sampleMask = numpy.squeeze(numpy.ones([self.noSamp, 1], dtype=bool))
sampleMask[[0, 2]] = False
expectedDataset = copy.deepcopy(self.data)
expectedDataset.sampleMetadataExcluded = []
expectedDataset.intensityDataExcluded = []
expectedDataset.featureMetadataExcluded = []
expectedDataset.excludedFlag = []
expectedDataset.sampleMetadataExcluded.append(expectedDataset.sampleMetadata.loc[~sampleMask, :])
expectedDataset.intensityDataExcluded.append(expectedDataset.intensityData[~sampleMask, :])
expectedDataset.featureMetadataExcluded.append(expectedDataset.featureMetadata)
expectedDataset.excludedFlag.append('Samples')
expectedDataset.featureMetadataExcluded.append(expectedDataset.featureMetadata.loc[~featureMask, :])
expectedDataset.intensityDataExcluded.append(expectedDataset.intensityData[sampleMask, :][:, ~featureMask])
expectedDataset.sampleMetadataExcluded.append(expectedDataset.sampleMetadata.loc[sampleMask, :])
expectedDataset.sampleMetadataExcluded[1].reset_index(drop=True, inplace=True)
expectedDataset.excludedFlag.append('Features')
expectedDataset.intensityData = expectedDataset.intensityData[sampleMask, :][:, featureMask]
expectedDataset.sampleMetadata = expectedDataset.sampleMetadata.loc[sampleMask, :]
expectedDataset.sampleMetadata.reset_index(drop=True, inplace=True)
expectedDataset.featureMetadata = expectedDataset.featureMetadata.loc[featureMask, :]
expectedDataset.featureMetadata.reset_index(drop=True, inplace=True)
expectedDataset.initialiseMasks()
maskedDataset = copy.deepcopy(self.data)
maskedDataset.initialiseMasks()
maskedDataset.featureMask[1] = False
maskedDataset.sampleMask[[0, 2]] = False
maskedDataset.applyMasks()
with self.subTest(msg='Checking sampleMetadata'):
pandas.testing.assert_frame_equal(maskedDataset.sampleMetadata, expectedDataset.sampleMetadata)
with self.subTest(msg='Checking featureMetadata'):
pandas.testing.assert_frame_equal( maskedDataset.featureMetadata.reindex(sorted(maskedDataset.featureMetadata), axis=1), expectedDataset.featureMetadata.reindex(sorted(expectedDataset.featureMetadata), axis=1))
with self.subTest(msg='Checking intensityData'):
numpy.testing.assert_array_equal(maskedDataset.intensityData, expectedDataset.intensityData)
with self.subTest(msg='Checking sampleMetadataExcluded'):
pandas.testing.assert_frame_equal(expectedDataset.sampleMetadataExcluded[0], maskedDataset.sampleMetadataExcluded[0])
pandas.testing.assert_frame_equal(expectedDataset.sampleMetadataExcluded[1], maskedDataset.sampleMetadataExcluded[1])
with self.subTest(msg='Checking intensityMetadataExcluded'):
numpy.testing.assert_array_equal(expectedDataset.intensityDataExcluded[0], maskedDataset.intensityDataExcluded[0])
numpy.testing.assert_array_equal(expectedDataset.intensityDataExcluded[1], maskedDataset.intensityDataExcluded[1])
with self.subTest(msg='Checking featureMetadataExcluded'):
pandas.testing.assert_frame_equal(expectedDataset.featureMetadataExcluded[0], maskedDataset.featureMetadataExcluded[0])
pandas.testing.assert_frame_equal(expectedDataset.featureMetadataExcluded[1], maskedDataset.featureMetadataExcluded[1])
with self.subTest(msg='Checking excludedFlag'):
self.assertListEqual(expectedDataset.excludedFlag, maskedDataset.excludedFlag)
with self.subTest(msg='Checking featureMask'):
numpy.testing.assert_array_equal(expectedDataset.featureMask, maskedDataset.featureMask)
with self.subTest(msg='Checking sampleMask'):
numpy.testing.assert_array_equal(expectedDataset.sampleMask, maskedDataset.sampleMask)
def test_updateMasks_raises(self):
self.data.initialiseMasks()
with self.subTest(msg='Features not implemented'):
self.assertRaises(NotImplementedError, self.data.updateMasks, filterFeatures=True)
with self.subTest(msg='Sample Types'):
self.assertRaises(TypeError, self.data.updateMasks, sampleTypes=[1, 2, 4])
self.assertRaises(TypeError, self.data.updateMasks, sampleTypes='not a list')
with self.subTest(msg='Assay Roles'):
self.assertRaises(TypeError, self.data.updateMasks, assayRoles=[1, 2, 4])
self.assertRaises(TypeError, self.data.updateMasks, assayRoles='not a list')
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
dataset = nPYc.Dataset()
dataset.intensityData = numpy.zeros([18, 5],dtype=float)
dataset.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
dataset.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(withArtifactualFiltering=False, filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
with self.subTest(msg='No filtering'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
dataset.initialiseMasks()
dataset.updateMasks(withArtifactualFiltering=False, filterFeatures=False, filterSamples=False)
numpy.testing.assert_array_equal(expectedSampleMask, dataset.sampleMask)
def test_validateObject(self):
testDataset = copy.deepcopy(self.data)
# fake exclusions
testDataset.sampleMetadataExcluded = []
testDataset.intensityDataExcluded = []
testDataset.featureMetadataExcluded = []
testDataset.excludedFlag = []
testDataset.sampleMetadataExcluded.append(testDataset.sampleMetadata.loc[[0, 2], :])
testDataset.intensityDataExcluded.append(testDataset.intensityData[[0, 2], :])
testDataset.featureMetadataExcluded.append(testDataset.featureMetadata)
testDataset.excludedFlag.append('Samples')
testDataset.featureMetadataExcluded.append(testDataset.featureMetadata.loc[3, :])
testDataset.intensityDataExcluded.append(testDataset.intensityData[:, 3])
testDataset.sampleMetadataExcluded.append(testDataset.sampleMetadata)
testDataset.excludedFlag.append('Features')
with self.subTest(msg='validateObject successful on empty Dataset'):
goodDataset = nPYc.Dataset()
self.assertTrue(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True))
with self.subTest(msg='validateObject successful on basic dataset'):
goodDataset = copy.deepcopy(testDataset)
self.assertTrue(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True))
with self.subTest(msg='check raise warnings'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, False)
# check each warning
self.assertEqual(len(w), 2)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.featureMetadata'" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to Dataset:" in str(w[1].message)
with self.subTest(msg='check not raise warnings with raiseWarning=False'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
# check it generally worked
self.assertEqual(result, False)
# check each warning
self.assertEqual(len(w), 0)
with self.subTest(msg='if self.Attributes does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'Attributes')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes is not a dict'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes = 'not a dict'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Log\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['Log']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Log\'] is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['Log'] = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'dpi\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['dpi']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'dpi\'] is not an int'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['dpi'] = 'not an int'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'figureSize\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['figureSize']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'figureSize\'] is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['figureSize'] = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'figureSize\'] is not of length 2'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['figureSize'] = ['too short list']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'figureSize\'][0] is not an int or float'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['figureSize'][0] = 'not an int or float'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'figureSize\'][1] is not an int or float'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['figureSize'][1] = 'not an int or float'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'figureFormat\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['figureFormat']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'figureFormat\'] is not a str'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['figureFormat'] = 5.0
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'histBins\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['histBins']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'histBins\'] is not an int'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['histBins'] = 'not an int'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'noFiles\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['noFiles']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'noFiles\'] is not an int'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['noFiles'] = 'not an int'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'quantiles\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['quantiles']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'quantiles\'] is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['quantiles'] = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'quantiles\'] is not of length 2'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['quantiles'] = ['too short list']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'quantiles\'][0] is not an int or float'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['quantiles'][0] = 'not an int or float'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'quantiles\'][1] is not an int or float'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['quantiles'][1] = 'not an int or float'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'sampleMetadataNotExported\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['sampleMetadataNotExported']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'sampleMetadataNotExported\'] is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['sampleMetadataNotExported'] = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'featureMetadataNotExported\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['featureMetadataNotExported']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'featureMetadataNotExported\'] is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['featureMetadataNotExported'] = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'analyticalMeasurements\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['analyticalMeasurements']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'analyticalMeasurements\'] is not a dict'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['analyticalMeasurements'] = 'not a dict'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'excludeFromPlotting\'] does not exist'):
badDataset = copy.deepcopy(testDataset)
del badDataset.Attributes['excludeFromPlotting']
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'excludeFromPlotting\'] is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.Attributes['excludeFromPlotting'] = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.VariableType does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'VariableType')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._Normalisation does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, '_Normalisation')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._Normalisation is not Normaliser ABC'):
badDataset = copy.deepcopy(testDataset)
badDataset._Normalisation = 'not Normaliser ABC'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._name does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, '_name')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._name is not a str'):
badDataset = copy.deepcopy(testDataset)
badDataset._name = 5.
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._intensityData does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, '_intensityData')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._intensityData is not a numpy.ndarray'):
badDataset = copy.deepcopy(testDataset)
badDataset._intensityData = 5.
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.sampleMetadata does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'sampleMetadata')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata is not a pandas.DataFrame'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata = 5.
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Sample File Name column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Sample File Name'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have an AssayRole column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['AssayRole'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a SampleType column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['SampleType'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Dilution column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Batch column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Batch'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Correction Batch column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Correction Batch'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Run Order column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Run Order'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have an Acquired Time column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Acquired Time'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Sample Base Name column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Sample Base Name'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Sample ID column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Sample ID'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Exclusion Details column'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadata.drop(['Exclusion Details'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.featureMetadata does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'featureMetadata')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata is not a pandas.DataFrame'):
badDataset = copy.deepcopy(testDataset)
badDataset.featureMetadata = 5.
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a Feature Name column'):
badDataset = copy.deepcopy(testDataset)
badDataset.featureMetadata.drop(['Feature Name'], axis=1, inplace=True)
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'sampleMask')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask is not a numpy.ndarray'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMask = 'not an array'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask are not a bool'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMask = numpy.array([[5.]])
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'featureMask')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask is not a numpy.ndarray'):
badDataset = copy.deepcopy(testDataset)
badDataset.featureMask = 'not an array'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask are not a bool'):
badDataset = copy.deepcopy(testDataset)
badDataset.featureMask = numpy.array([[5.]])
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadataExcluded does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'sampleMetadataExcluded')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadataExcluded is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.sampleMetadataExcluded = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.intensityDataExcluded does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'intensityDataExcluded')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.intensityDataExcluded is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.intensityDataExcluded = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.intensityDataExcluded does not have the same number of exclusions as self.sampleMetadataExcluded'):
badDataset = copy.deepcopy(testDataset)
badDataset.intensityDataExcluded = [[1], [1], [1]]
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadataExcluded does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'featureMetadataExcluded')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadataExcluded is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.featureMetadataExcluded = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadataExcluded does not have the same number of exclusions as self.sampleMetadataExcluded'):
badDataset = copy.deepcopy(testDataset)
badDataset.featureMetadataExcluded = [[1], [1], [1]]
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.excludedFlag does not exist'):
badDataset = copy.deepcopy(testDataset)
delattr(badDataset, 'excludedFlag')
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.excludedFlag is not a list'):
badDataset = copy.deepcopy(testDataset)
badDataset.excludedFlag = 'not a list'
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.excludedFlag does not have the same number of exclusions as self.sampleMetadataExcluded'):
badDataset = copy.deepcopy(testDataset)
badDataset.excludedFlag = [[1], [1], [1]]
self.assertFalse(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False))
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
def test_exportDataset(self):
with tempfile.TemporaryDirectory() as tmpdirname:
tmpdirname = os.path.join(tmpdirname, 'testOutput')
projectName = 'tempFile'
self.data.name = projectName
self.data.exportDataset(tmpdirname, saveFormat='CSV', withExclusions=False, filterMetadata=False)
# Load exported data back in, cast types back to str
filePath = os.path.join(tmpdirname, projectName + '_intensityData.csv')
intensityData = numpy.loadtxt(filePath, dtype=float, delimiter=',')
filePath = os.path.join(tmpdirname, projectName + '_featureMetadata.csv')
featureMetadata = pandas.read_csv(filePath, index_col=0)
featureMetadata['Feature Name'] = featureMetadata['Feature Name'].astype(str)
filePath = os.path.join(tmpdirname, projectName + '_sampleMetadata.csv')
sampleMetadata = pandas.read_csv(filePath, index_col=0)
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(str)
numpy.testing.assert_array_almost_equal_nulp(self.data.intensityData, intensityData)
pandas.testing.assert_frame_equal(self.data.featureMetadata, featureMetadata, check_dtype=False)
pandas.testing.assert_frame_equal(self.data.sampleMetadata, sampleMetadata, check_dtype=False)
def test_exportcsv(self):
"""
Check that csvs as written match the data tables in memory.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
projectName = os.path.join(tmpdirname, 'tempFile')
self.data._exportCSV(projectName, escapeDelimiters=False)
# Load exported data back in, cast types back to str
filePath = os.path.join(tmpdirname, projectName + '_intensityData.csv')
intensityData = numpy.loadtxt(filePath, dtype=float, delimiter=',')
filePath = os.path.join(tmpdirname, projectName + '_featureMetadata.csv')
featureMetadata = pandas.read_csv(filePath, index_col=0)
featureMetadata['Feature Name'] = featureMetadata['Feature Name'].astype(str)
filePath = os.path.join(tmpdirname, projectName + '_sampleMetadata.csv')
sampleMetadata = pandas.read_csv(filePath, index_col=0)
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(str)
numpy.testing.assert_array_almost_equal_nulp(self.data.intensityData, intensityData)
pandas.testing.assert_frame_equal(self.data.featureMetadata, featureMetadata, check_dtype=False)
pandas.testing.assert_frame_equal(self.data.sampleMetadata, sampleMetadata, check_dtype=False)
def test_exportunifiedcsv(self):
"""
Verify unified csvs are written correctly.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
projectName = os.path.join(tmpdirname, 'tempFile')
self.data._exportUnifiedCSV(projectName)
filePath = os.path.join(projectName + '_combinedData.csv')
savedData = pandas.read_csv(filePath)
intensityData = savedData.iloc[self.data.featureMetadata.shape[1]:, self.data.sampleMetadata.shape[1]+1:].apply(pandas.to_numeric)
# Extract feature metadata
featureMetadata = savedData.iloc[:self.data.featureMetadata.shape[1], self.data.sampleMetadata.shape[1]+1:].T
featureMetadata.columns = savedData.iloc[:self.data.featureMetadata.shape[1], 0]
featureMetadata.reset_index(drop=True, inplace=True)
featureMetadata.columns.name = None
# Extract sample metadata
sampleMetadata = savedData.iloc[self.data.featureMetadata.shape[1]:, :self.data.sampleMetadata.shape[1]+1]
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(int).astype(str)
sampleMetadata.drop('Unnamed: 0', axis=1, inplace=True)
sampleMetadata.reset_index(drop=True, inplace=True)
numpy.testing.assert_array_almost_equal(self.data.intensityData, intensityData)
pandas.testing.assert_frame_equal(self.data.featureMetadata, featureMetadata, check_dtype=False)
pandas.testing.assert_frame_equal(self.data.sampleMetadata, sampleMetadata, check_dtype=False)
def test_exportdataset_withexclusions(self):
"""
Test that csv files saved with exclusions match the dataset generated after exclusions are applied.
"""
self.data.initialiseMasks()
featureToExclude = numpy.random.randint(1, self.noFeat, size=numpy.random.randint(1, int(self.noFeat / 2) + 1))
sampleToExclude = numpy.random.randint(1, self.noSamp, size=numpy.random.randint(1, int(self.noSamp / 2) + 1))
self.data.featureMask[featureToExclude] = False
self.data.sampleMask[sampleToExclude] = False
with tempfile.TemporaryDirectory() as tmpdirname:
projectName = 'tempFile'
self.data.name = projectName
self.data.exportDataset(destinationPath=tmpdirname, saveFormat='CSV', withExclusions=True, filterMetadata=False)
# Load exported data back in, cast types back to str
filePath = os.path.join(tmpdirname, projectName + '_intensityData.csv')
intensityData = numpy.loadtxt(filePath, dtype=float, delimiter=',')
filePath = os.path.join(tmpdirname, projectName + '_featureMetadata.csv')
featureMetadata = pandas.read_csv(filePath)
featureMetadata.drop('Unnamed: 0', axis=1, inplace=True)
featureMetadata['Feature Name'] = featureMetadata['Feature Name'].astype(str)
filePath = os.path.join(tmpdirname, projectName + '_sampleMetadata.csv')
sampleMetadata = pandas.read_csv(filePath)
sampleMetadata.drop('Unnamed: 0', axis=1, inplace=True)
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(str)
# Loaded data and data after _applyMasks() should match
self.data.applyMasks()
numpy.testing.assert_equal(self.data.intensityData, intensityData)
pandas.testing.assert_frame_equal(self.data.featureMetadata, featureMetadata, check_dtype=False)
pandas.testing.assert_frame_equal(self.data.sampleMetadata, sampleMetadata, check_dtype=False)
def test_exportdataset_filterMetadata(self):
"""
Test that csv files saved with `filterMetadata` match the dataset with these sampleMetadata and featureMetadata columns removed.
"""
featureMetaCols = list(set(self.data.featureMetadata.columns.tolist()) - set(['Feature Name']))
sampleMetaCols = list(set(self.data.sampleMetadata.columns.tolist()) - set(['Sample File Name']))
randomFeatCols = [1] #numpy.random.randint(0, len(featureMetaCols), size=numpy.random.randint(int(len(featureMetaCols) / 2) + 1)).tolist()
randomSampCols = numpy.random.randint(0, len(sampleMetaCols), size=numpy.random.randint(int(len(sampleMetaCols) / 3) + 1)).tolist()
self.data.Attributes['featureMetadataNotExported'] = [x for i, x in enumerate(featureMetaCols) if i in randomFeatCols] + ['not an existing featureMeta column']
self.data.Attributes['sampleMetadataNotExported'] = [x for i, x in enumerate(sampleMetaCols) if i in randomSampCols] + ['not an existing sampleMeta column']
with tempfile.TemporaryDirectory() as tmpdirname:
projectName = 'tempFile'
self.data.name = projectName
self.data.exportDataset(destinationPath=tmpdirname, saveFormat='UnifiedCSV', withExclusions=False, filterMetadata=True)
# Load exported data back in, cast types back to str
filePath = os.path.join(tmpdirname, projectName + '_combinedData.csv')
savedData = pandas.read_csv(filePath)
featureCols = self.data.featureMetadata.shape[1] - (len(self.data.Attributes['featureMetadataNotExported']) -1)
sampleCols = self.data.sampleMetadata.shape[1]+1 - (len(self.data.Attributes['sampleMetadataNotExported']) -1)
intensityData = savedData.iloc[featureCols:, sampleCols:].apply(pandas.to_numeric)
# Extract feature metadata
featureMetadata = savedData.iloc[:featureCols, sampleCols:].T
featureMetadata.columns = savedData.iloc[:featureCols, 0]
featureMetadata.reset_index(drop=True, inplace=True)
featureMetadata.columns.name = None
# Extract sample metadata
sampleMetadata = savedData.iloc[featureCols:, :sampleCols]
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(int).astype(str)
sampleMetadata.drop('Unnamed: 0', axis=1, inplace=True)
sampleMetadata.reset_index(drop=True, inplace=True)
featureMetadata['Feature Name'] = featureMetadata['Feature Name'].astype(str)
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(str)
# Check we did not break the data we kept
numpy.testing.assert_array_almost_equal(self.data.intensityData, intensityData)
for column in featureMetadata.columns:
pandas.testing.assert_series_equal(self.data.featureMetadata[column], featureMetadata[column], check_dtype=False)
for column in sampleMetadata.columns:
pandas.testing.assert_series_equal(self.data.sampleMetadata[column], sampleMetadata[column], check_dtype=False)
# Check excluded columns are not in the new tables
for column in randomFeatCols:
self.assertFalse(column in featureMetadata.columns)
for column in randomSampCols:
self.assertFalse(column in sampleMetadata.columns)
def test_exportDataset_raises(self):
self.assertRaises(TypeError, self.data.exportDataset, destinationPath=1)
self.assertRaises(ValueError, self.data.exportDataset, saveFormat='Not known', withExclusions=False)
self.assertRaises(TypeError, self.data.exportDataset, withExclusions='no')
self.assertRaises(TypeError, self.data.exportDataset, filterMetadata='no')
def test_print_log(self):
"""
Verify logged items are printed correctly.
"""
from datetime import datetime
time1 = datetime.now()
time2 = datetime(2016, 1, 30, 12, 15)
time3 = datetime(2016, 10, 10, 13, 30)
str1 = 'Log entry 1'
str2 = 'Log entry 2'
str3 = 'Log entry 3'
self.data.Attributes['Log'].append([time1, str1])
self.data.Attributes['Log'].append([time2, str2])
self.data.Attributes['Log'].append([time3, str3])
output = self.data.Attributes['Log'][0][0].strftime(self.data._timestampFormat)
output = output + "\t"
output = output + self.data.Attributes['Log'][0][1]
output = output + "\n"
output = output + time1.strftime(self.data._timestampFormat)
output = output + "\t"
output = output + str1
output = output + "\n"
output = output + time2.strftime(self.data._timestampFormat)
output = output + "\t"
output = output + str2
output = output + "\n"
output = output + time3.strftime(self.data._timestampFormat)
output = output + "\t"
output = output + str3
output = output + "\n"
self.assertEqual(self.data.log, output)
def test_exclude_samples(self):
exclusionList = numpy.random.randint(1, self.noSamp, size=numpy.random.randint(1, int(self.noSamp / 2) + 1))
exclusionList = set(exclusionList)
exclusionList = list(exclusionList)
self.data.initialiseMasks()
exclusionsStr = list(map(str, exclusionList))
exclusionsStr.append('Not a sample in the list')
missingSamples = self.data.excludeSamples(exclusionsStr, on='Sample File Name', message='Test Excluded')
exclusionList = [x - 1 for x in exclusionList]
expectedSampleMask = numpy.squeeze(numpy.ones([self.noSamp, 1], dtype=bool))
expectedSampleMask[numpy.ix_(exclusionList)] = False
numpy.testing.assert_array_equal(self.data.sampleMask, expectedSampleMask)
self.assertEqual(missingSamples, ['Not a sample in the list'])
def test_exclude_samples_raises(self):
exclusionList = numpy.random.randint(1, self.noSamp, size=numpy.random.randint(1, int(self.noSamp / 2) + 1))
exclusionList = set(exclusionList)
exclusionList = list(exclusionList)
self.data.initialiseMasks()
self.assertRaises(ValueError, self.data.excludeSamples, map(str, exclusionList), on='Not a real key', message='Test Excluded')
self.assertRaises(TypeError, self.data.excludeSamples, map(str, exclusionList), on='Sample File Name', message=list())
def test_exclude_features_discrete(self):
exclusionList = numpy.random.randint(1, self.noFeat, size=numpy.random.randint(1, int(self.noFeat / 2) + 1))
exclusionList = set(exclusionList)
exclusionList = list(exclusionList)
self.data.initialiseMasks()
exclusionsStr = list(map(str, exclusionList))
exclusionsStr.append('Not a feature in the list')
missingFeatures = self.data.excludeFeatures(exclusionsStr, on='Feature Name', message='Test Excluded')
exclusionList = [x - 1 for x in exclusionList]
expectedFeatureMask = numpy.squeeze(numpy.ones([self.noFeat, 1], dtype=bool))
expectedFeatureMask[numpy.ix_(exclusionList)] = False
numpy.testing.assert_array_equal(self.data.featureMask, expectedFeatureMask)
self.assertEqual(missingFeatures, ['Not a feature in the list'])
def test_exclude_features_spectral(self):
noSamp = numpy.random.randint(5, high=10, size=None)
noFeat = numpy.random.randint(500, high=1000, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='NMRDataset', variableType=VariableType.Spectral)
ranges = [(1,2), (8.5, 7)]
mask = numpy.ones_like(dataset.featureMask, dtype=bool)
for spRange in ranges:
localMask = numpy.logical_or(dataset.featureMetadata['ppm'] < min(spRange),
dataset.featureMetadata['ppm'] > max(spRange))
mask = numpy.logical_and(mask, localMask)
dataset.excludeFeatures(ranges, on='ppm')
numpy.testing.assert_array_equal(mask, dataset.featureMask)
def test_exclude_features_raises(self):
exclusionList = numpy.random.randint(1, self.noFeat, size=numpy.random.randint(1, int(self.noFeat / 2) + 1))
exclusionList = set(exclusionList)
exclusionList = list(exclusionList)
self.data.initialiseMasks()
self.assertRaises(ValueError, self.data.excludeFeatures, map(str, exclusionList), on='Not a real key', message='Test Excluded')
self.assertRaises(TypeError, self.data.excludeFeatures, map(str, exclusionList), on='Feature Name', message=list())
self.data.VariableType = 'Not a real type'
self.assertRaises(ValueError, self.data.excludeFeatures, ['unimportant'], on='Feature Name', message='Test Excluded')
def test_exclude_features_warns(self):
self.data.VariableType = VariableType.Spectral
self.assertWarns(UserWarning, self.data.excludeFeatures, [(1, 1)], on='Feature Name', message='Test Excluded')
def test_get_features_discrete(self):
self.data.VariableType = nPYc.enumerations.VariableType.Discrete
self.data.initialiseMasks()
with self.subTest(msg='List of features'):
# Select a random set of features
featureList = numpy.random.randint(1, self.noFeat, size=numpy.random.randint(1, int(self.noFeat / 2) + 1))
featureNames = [*self.data.featureMetadata.loc[featureList, 'Feature Name']]
features, measuments = self.data.getFeatures(featureNames, by='Feature Name')
numpy.testing.assert_array_equal(self.data.intensityData[:, featureList], measuments)
pandas.testing.assert_frame_equal(self.data.featureMetadata.iloc[featureList], features)
with self.subTest(msg='Single feature'):
# Select a random set of features
featureList = numpy.random.randint(1, self.noFeat)
featureName = self.data.featureMetadata.loc[featureList, 'Feature Name']
features, measuments = self.data.getFeatures(featureName, by='Feature Name')
numpy.testing.assert_array_equal(self.data.intensityData[:, featureList], numpy.squeeze(measuments))
pandas.testing.assert_frame_equal(self.data.featureMetadata.iloc[[featureList]], features)
def test_get_features_spectral(self):
spectrumRange = (-5, 5)
data = nPYc.Dataset()
validChars = string.ascii_letters + string.digits
# Function to generate random strings:
# Randomly sized intensity data
noFeat = numpy.random.randint(100,1000)
noSamp = numpy.random.randint(3,100)
data.intensityData = numpy.random.rand(noSamp,noFeat)
data.sampleMetadata = pandas.DataFrame(numpy.linspace(1,noSamp, num=noSamp, dtype=int), columns=['Sample File Name']).astype(str)
data.featureMetadata = pandas.DataFrame(numpy.linspace(spectrumRange[0],spectrumRange[1], num=noFeat, dtype=float), columns=['ppm'])
data.VariableType = nPYc.enumerations.VariableType.Spectral
data.Attributes['Feature Names'] = 'ppm'
data.initialiseMasks()
with self.subTest(msg='List of features'):
# Select a random set of features
# Between two and five ranges
noRanges = numpy.random.randint(2, 5)
ppmRange = list()
rangeMask = numpy.zeros_like(data.featureMask)
for i in range(0, noRanges):
startIndex = numpy.random.randint(0, noFeat/2)
endIndex = startIndex + numpy.random.randint(2, 10)
if endIndex > data.noFeatures:
endIndex = data.noFeatures
rangeMask[startIndex:endIndex+1] = True
ppmRange.append((data.featureMetadata.loc[startIndex, 'ppm'], data.featureMetadata.loc[endIndex, 'ppm']))
features, measuments = data.getFeatures(ppmRange, by='ppm')
pandas.testing.assert_frame_equal(data.featureMetadata.loc[rangeMask], features)
numpy.testing.assert_array_equal(data.intensityData[:, rangeMask], measuments)
with self.subTest(msg='Single feature'):
# Select a random set of features
startIndex = numpy.random.randint(0, noFeat/2)
endIndex = startIndex + numpy.random.randint(2, noFeat/10)
ppmRange = (data.featureMetadata.loc[startIndex, 'ppm'], data.featureMetadata.loc[endIndex, 'ppm'])
features, measuments = data.getFeatures(ppmRange, by='ppm')
numpy.testing.assert_array_equal(data.intensityData[:, startIndex:endIndex+1], measuments)
pandas.testing.assert_frame_equal(data.featureMetadata.iloc[startIndex:endIndex+1], features)
with self.subTest(msg='Fliped range feature'):
# Select a random set of features
startIndex = numpy.random.randint(0, noFeat/2)
endIndex = startIndex + numpy.random.randint(2, noFeat/10)
ppmRange = (data.featureMetadata.loc[endIndex, 'ppm'], data.featureMetadata.loc[startIndex, 'ppm'])
features, measuments = data.getFeatures(ppmRange, by='ppm')
numpy.testing.assert_array_equal(data.intensityData[:, startIndex:endIndex+1], measuments)
pandas.testing.assert_frame_equal(data.featureMetadata.iloc[startIndex:endIndex+1], features)
def test_get_features_autofeaturename(self):
self.data.initialiseMasks()
featureNames = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(numpy.random.randint(3,15)))
self.data.VariableType = nPYc.enumerations.VariableType.Discrete
self.data.Attributes['Feature Names'] = featureNames
self.data.featureMetadata.rename(columns={'Feature Name': featureNames}, inplace=True)
# Select a random set of features
featureList = numpy.random.randint(1, self.noFeat, size=numpy.random.randint(1, int(self.noFeat / 2) + 1))
featureNames = [*self.data.featureMetadata.loc[featureList, featureNames]]
features, measuments = self.data.getFeatures(featureNames)
numpy.testing.assert_array_equal(self.data.intensityData[:, featureList], measuments)
pandas.testing.assert_frame_equal(self.data.featureMetadata.iloc[featureList], features)
def test_get_features_raises(self):
self.data.VariableType = nPYc.enumerations.VariableType.Discrete
self.assertRaises(KeyError, self.data.getFeatures, 'featureName', by='Banana')
self.data.VariableType = 'Not an enum'
self.assertRaises(TypeError, self.data.getFeatures, 'featureName', by='Feature Name')
class test_dataset_loadsop(unittest.TestCase):
"""
Test the loading of SOP json
"""
def setUp(self):
# Load empty object and populate with sythetic data.
# Empty object
self.data = nPYc.Dataset()
def test_loadparameters(self):
with self.subTest(msg='Checking null return for \'Generic\' SOP.'):
attributes = {'Log': self.data.Attributes['Log'],
"methodName": "Unknown",
'dpi': 300,
'figureFormat': 'png',
'figureSize': [11, 7],
'histBins': 100,
'noFiles': 10,
'quantiles': [25, 75],
'sampleMetadataNotExported': ["Exclusion Details"],
'featureMetadataNotExported': [],
"analyticalMeasurements":{},
"excludeFromPlotting":[],
"sampleTypeColours": {"StudySample": "b", "StudyPool": "g", "ExternalReference": "r", "MethodReference": "m",
"ProceduralBlank": "c", "Other": "grey", "Study Sample": "b",
"Study Reference": "g", "Long-Term Reference": "r",
"Method Reference": "m", "Blank": "c",
"Unspecified SampleType or AssayRole": "grey"}
}
self.assertEqual(self.data.Attributes, attributes)
def test_overrideparameters(self):
data = nPYc.Dataset(figureFormat='svg', squids=True)
attributes = {'Log': data.Attributes['Log'],
'methodName': 'Unknown',
'dpi': 300,
'figureFormat': 'svg',
'figureSize': [11, 7],
'histBins': 100,
'noFiles': 10,
'quantiles': [25, 75],
'squids': True,
'sampleMetadataNotExported': ["Exclusion Details"],
'featureMetadataNotExported': [],
"analyticalMeasurements":{},
"excludeFromPlotting":[],
"sampleTypeColours": {"StudySample": "b", "StudyPool": "g", "ExternalReference": "r", "MethodReference": "m",
"ProceduralBlank": "c", "Other": "grey", "Study Sample": "b",
"Study Reference": "g", "Long-Term Reference": "r",
"Method Reference": "m", "Blank": "c",
"Unspecified SampleType or AssayRole": "grey"}
}
self.assertEqual(data.Attributes, attributes)
def test_loadParameters_invalidsoppath(self):
with tempfile.TemporaryDirectory() as tmpdirname:
fakeSOPpath = os.path.join(tmpdirname, 'foldernotthere')
self.assertRaises(ValueError, self.data._loadParameters, 'fakeSOP', fakeSOPpath)
def test_loadParameters_invalidsop(self):
with tempfile.TemporaryDirectory() as tmpdirname:
self.assertRaises(ValueError, self.data._loadParameters, 'fakeSOP', None)
def test_loadParameters_customsoppath(self):
testSOPcontents = {'testsopkey':'testsopvalue', 'testsopkey2': 2}
with tempfile.TemporaryDirectory() as tmpdirname:
# Create temp SOP file
with open(os.path.join(tmpdirname, 'testSOP.json'), 'w') as outfile:
json.dump(testSOPcontents, outfile)
self.data._loadParameters('testSOP', tmpdirname)
testSOPcontents = {**self.data.Attributes, **testSOPcontents}
self.assertEqual(self.data.Attributes, testSOPcontents)
class test_dataset_addsampleinfo(unittest.TestCase):
"""
Test the loading of study designs
"""
def setUp(self):
self.Data = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.Data.addSampleInfo(descriptionFormat='Filenames')
def test_dataset_load_npc_lims(self):
"""
Test we are matching samples IDs in the LIMS correctly
"""
samplingIDs = pandas.Series(['Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample', 'Study Pool Sample', 'Procedural Blank Sample', 'Procedural Blank Sample',
'Study Pool Sample','Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'Study Pool Sample','Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample', 'Study Pool Sample',
'UT1_S1_s1', 'UT1_S2_s1', 'UT1_S3_s1', 'Not specified', 'UT1_S4_s2', 'UT1_S4_s3', 'UT1_S4_s4', 'UT1_S4_s5',
'External Reference Sample', 'Study Pool Sample', 'Not specified'], name='Sample ID', dtype='str')
samplingIDs = samplingIDs.astype(str)
data = copy.deepcopy(self.Data)
data.addSampleInfo(descriptionFormat='NPC LIMS', filePath=os.path.join('..','..','npc-standard-project','Derived_Worklists','UnitTest1_MS_serum_PCSOP.069.csv'))
pandas.testing.assert_series_equal(data.sampleMetadata['Sample ID'], samplingIDs)
def test_dataset_load_npc_subjectinfo_columns(self):
columns = ['Person responsible', 'Sampling Protocol', 'Creatinine (mM)', 'Glucose (mM)', 'Class', 'Date of Birth', 'Gender', 'Further Subject info?', 'Environmental measures', 'SubjectInfoData']
data = copy.deepcopy(self.Data)
data.addSampleInfo(descriptionFormat='NPC LIMS', filePath=os.path.join('..','..','npc-standard-project','Derived_Worklists','UnitTest1_MS_serum_PCSOP.069.csv'))
data.addSampleInfo(descriptionFormat='NPC Subject Info', filePath=os.path.join('..','..','npc-standard-project','Project_Description','UnitTest1_metadata_PCDOC.014.xlsx'))
data.addSampleInfo(descriptionFormat='NPC Subject Info', filePath=os.path.join('..','..','npc-standard-project','Project_Description','UnitTest1_metadata_PCDOC.014.xlsx'))
for column in columns:
self.subTest(msg='Checking ' + column)
self.assertIn(column, data.sampleMetadata.keys())
def test_dataset_load_csv(self):
from nPYc.enumerations import AssayRole, SampleType
data = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
data.addSampleInfo(descriptionFormat='Basic CSV', filePath=os.path.join('..', '..','npc-standard-project','Derived_Worklists', 'UnitTest1_metadata_basic_csv.csv'))
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False], dtype=bool)
expectedSampleMetadata = pandas.DataFrame(0, index=numpy.arange(115), columns=['Sample File Name', 'Sample Base Name', 'Batch', 'Correction Batch', 'Acquired Time', 'Run Order',
'Exclusion Details', 'Metadata Available', 'Sample ID', 'AssayRole', 'SampleType', 'Dilution'])
expectedSampleMetadata['Sample File Name'] = ['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02', 'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06', 'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10', 'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14', 'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18', 'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22', 'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26', 'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30', 'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34', 'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38', 'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42', 'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46', 'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50', 'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54', 'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58', 'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62', 'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66', 'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70', 'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74', 'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78', 'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82', 'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86', 'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90', 'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02', 'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR', 'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR', 'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02', 'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06', 'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR', 'UnitTest1_LPOS_ToF02_ERROR']
expectedSampleMetadata['Sample Base Name'] = expectedSampleMetadata['Sample File Name']
expectedSampleMetadata['Metadata Available'] = True
expectedSampleMetadata['Batch'] = numpy.nan
expectedSampleMetadata['Correction Batch'] = [numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan]
expectedSampleMetadata['Acquired Time'] = [numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan]
expectedSampleMetadata['Run Order'] = [numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan]
expectedSampleMetadata['Exclusion Details'] = [numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan]
expectedSampleMetadata['Sample ID'] = [numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, 'UT1_S1_s1', 'UT1_S2_s1', 'UT1_S3_s1',
'UT1_S4_s1', 'UT1_S4_s2', 'UT1_S4_s3', 'UT1_S4_s4', 'UT1_S4_s5', 'LTR',
'SR', numpy.nan]
expectedSampleMetadata['AssayRole'] = [AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference, AssayRole.LinearityReference,
AssayRole.Assay, AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.PrecisionReference,
AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.PrecisionReference,
AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.PrecisionReference, AssayRole.Assay, AssayRole.Assay,
AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay, AssayRole.Assay,
AssayRole.PrecisionReference, AssayRole.PrecisionReference, numpy.nan]
expectedSampleMetadata['SampleType'] = [SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.ProceduralBlank, SampleType.ProceduralBlank, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool,
SampleType.StudyPool, SampleType.StudyPool, SampleType.StudyPool, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample,
SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample, SampleType.StudySample,
SampleType.ExternalReference, SampleType.StudyPool, numpy.nan]
expectedSampleMetadata['Dilution'] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 20,
20, 20, 20, 20, 40, 40, 40, 60, 60, 60, 80, 80, 80, 80, 80,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20,
20, 40, 40, 40, 60, 60, 60, 80, 80, 80, 80, 80, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 0, 0, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
pandas.testing.assert_frame_equal(expectedSampleMetadata, data.sampleMetadata, check_dtype=False)
numpy.testing.assert_array_equal(expectedSampleMask, data.sampleMask)
def test_dataset_load_csv_raises(self):
with tempfile.TemporaryDirectory() as tmpdirname:
# Generate a CSV with no 'Sample File Name' column
testDF = pandas.DataFrame([[1,2,3],[1,2,3]], columns={'a', 'b', 'c'})
testDF.to_csv(os.path.join(tmpdirname, 'tmp.csv'))
data = nPYc.Dataset()
self.assertRaises(KeyError, data.addSampleInfo, descriptionFormat='Basic CSV', filePath=os.path.join(tmpdirname, 'tmp.csv'))
def test_dataset_load_isatab(self):
columns = ['Sample File Name', 'Sample Base Name', 'Sample Base Name Normalised',
'Sampling ID', 'Assay data name', 'Dilution', 'Run Order',
'Acquired Time', 'Instrument', 'Chromatography', 'Ionisation', 'Batch',
'Plate', 'Well', 'Correction Batch', 'Detector', 'Subject ID', 'Age',
'Gender', 'Status', 'Sample Name', 'Assay data name Normalised',
'Exclusion Details', 'Study Sample', 'Long-Term Reference',
'Study Reference', 'Method Reference', 'Dilution Series',
'LIMS Marked Missing', 'Data Present', 'LIMS Present', 'Skipped',
'AssayRole', 'SampleType', 'SubjectInfoData']
data = copy.deepcopy(self.Data)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
data.addSampleInfo(descriptionFormat='ISATAB',
filePath=os.path.join('..', '..', 'npc-standard-project', 'Project_Description', 'ISATAB-Unit-Test'),
studyID=1,
assay='MS',
assayID=1)
for column in columns:
self.subTest(msg='Checking ' + column)
self.assertIn(column, data.sampleMetadata.columns)
def test_dataset_parsefilename(self):
data = nPYc.Dataset()
self.assertRaises(NotImplementedError, data.addSampleInfo, descriptionFormat='Filenames', filenameSpec='')
def test_dataset_raises(self):
data = nPYc.Dataset()
self.assertRaises(NotImplementedError, data.addSampleInfo, descriptionFormat='Not an understood format', filenameSpec='')
class test_dataset_addfeatureinfo(unittest.TestCase):
def test_dataset_add_reference_ranges(self):
"""
Assume the addReferenceRanges function is well tested - just check the expected columns appear
"""
data = nPYc.Dataset()
referencePath = os.path.join('..', 'nPYc', 'StudyDesigns', 'BI-LISA_reference_ranges.json')
data.featureMetadata = pandas.DataFrame(['TPTG', 'TPCH', 'TPFC', 'TPA1', 'TPA2', 'TPAB', 'VLTG', 'VLCH', 'VLFC', 'VLPL', 'VLAB'],
columns=['Feature Name'])
data.addFeatureInfo(descriptionFormat='Reference Ranges', filePath=referencePath)
columns = ['Unit', 'Upper Reference Bound', 'Upper Reference Value', 'Lower Reference Bound', 'Lower Reference Value']
for column in columns:
self.subTest(msg='Checking ' + column)
self.assertIn(column, data.featureMetadata.columns)
def test_dataset_add_feature_info(self):
data = nPYc.Dataset()
csvFilePath = os.path.join('..', '..', 'npc-standard-project', 'Derived_Data',
'UnitTest1_PCSOP.069_featureMetadata.csv')
data.featureMetadata = pandas.DataFrame(
['3.17_262.0378m/z', '3.17_293.1812m/z', '3.17_145.0686m/z', '3.17_258.1033m/z'],
columns=['Feature Name'])
data.addFeatureInfo(descriptionFormat=None, filePath=csvFilePath, featureId='Feature Name')
expectedFeatureMetadata = pandas.DataFrame({'Feature Name': ['3.17_262.0378m/z', '3.17_293.1812m/z', '3.17_145.0686m/z', '3.17_258.1033m/z'],
'Retention Time': [3.17, 3.17, 3.17, 3.17],
'm/z': [262.0378, 293.1812, 145.0686, 258.1033]})
pandas.testing.assert_frame_equal(expectedFeatureMetadata, data.featureMetadata, check_dtype=False)
class test_dataset_initialiseFromCSV(unittest.TestCase):
def test_initialiseFromCSV(self):
noSamp = | numpy.random.randint(5, high=10, size=None) | numpy.random.randint |
import sys
sys.path.insert(1, '..')
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from findiff.operators import FinDiff, Identity, Coef
from findiff.pde import *
#import matplotlib.pyplot as plt
#from mpl_toolkits import mplot3d
class TestPDE(unittest.TestCase):
def test_1d_dirichlet_hom(self):
shape = (11,)
x = np.linspace(0, 1, 11)
dx = x[1] - x[0]
L = FinDiff(0, dx, 2)
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = 2
pde = PDE(L, np.zeros_like(x), bc)
u = pde.solve()
expected = x + 1
np.testing.assert_array_almost_equal(expected, u)
def test_1d_dirichlet_inhom(self):
nx = 21
shape = (nx,)
x = np.linspace(0, 1, nx)
dx = x[1] - x[0]
L = FinDiff(0, dx, 2)
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = 2
pde = PDE(L, 6*x, bc)
u = pde.solve()
expected = x**3 + 1
np.testing.assert_array_almost_equal(expected, u)
def test_1d_neumann_hom(self):
nx = 11
shape = (nx,)
x = np.linspace(0, 1, nx)
dx = x[1] - x[0]
L = FinDiff(0, dx, 2)
bc = BoundaryConditions(shape)
bc[0] = 1
bc[-1] = FinDiff(0, dx, 1), 2
pde = PDE(L, np.zeros_like(x), bc)
u = pde.solve()
expected = 2*x + 1
np.testing.assert_array_almost_equal(expected, u)
def test_2d_dirichlet_hom(self):
shape = (11, 11)
x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1])
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x, y, indexing='ij')
L = FinDiff(0, dx, 2) + FinDiff(1, dy, 2)
expected = X + 1
bc = BoundaryConditions(shape)
bc[0, :] = 1
bc[-1, :] = 2
bc[:, 0] = X + 1
bc[:, -1] = X + 1
pde = PDE(L, np.zeros_like(X), bc)
u = pde.solve()
np.testing.assert_array_almost_equal(expected, u)
def test_2d_dirichlet_inhom(self):
shape = (11, 11)
x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1])
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x, y, indexing='ij')
L = FinDiff(0, dx, 2) + FinDiff(1, dy, 2)
expected = X**3 + Y**3 + 1
f = 6*X + 6*Y
bc = BoundaryConditions(shape)
bc[0, :] = expected
bc[-1, :] = expected
bc[:, 0] = expected
bc[:, -1] = expected
pde = PDE(L, f, bc)
u = pde.solve()
| np.testing.assert_array_almost_equal(expected, u) | numpy.testing.assert_array_almost_equal |
#%pylab inline
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
#self.encoded_path = "./encoded_train_50.out"
#self.data_path = "./pp_fs-peptide.npy"
class Plot(object):
def _init__(self, encoded_path=None, data_path=None):
"""
encoded_path : string
- path of the encoded .out file. Should be located in ./output_data
data_path : string
- path of the data .npy. Should be located in ./output_data
"""
if(encoded_path == None or data_path == None):
raise ValueError("Must input encoded_path and data_path as parameters.")
if (not os.path.exists(encoded_path)):
raise Exception("Path " + str(encoded_path) + " does not exist!")
if (not os.path.exists(data_path)):
raise Exception("Path " + str(data_path) + " does not exist!")
self.encoded_path = encoded_path
self.data_path = data_path
def encode_images(self):
print("Encode image for train data")
# encode images
# project inputs on the latent space
self.x_pred_encoded = np.loadtxt(self.encoded_path)
#x_pred_encoded = x_pred_encoded[10000:110000]
data_input = np.load(self.data_path)
#data_input = data_input[10000:110000]
label = data_input.sum(axis=1)
label = np.reshape(label, (len(label), 1))
sep_train = 0.8
sep_test = 0.9
sep_pred = 1
sep_1 = int(data_input.shape[0]*sep_train)
sep_2 = int(data_input.shape[0]*sep_test)
sep_3 = int(data_input.shape[0]*sep_pred)
y_train_0 = label[:sep_1,0]
self.y_train_2 = label[:sep_1,0]
y_test_0 = label[sep_1:sep_2,0]
y_test_2 = label[sep_1:sep_2,0]
y_pred_0 = label[sep_2:sep_3,0]
y_pred_2 = label[sep_2:sep_3,0]
def plot(self):
# plot 1:
Dmax = self.y_train_2
[n,s] = np.histogram(Dmax, 11)
d = np.digitize(Dmax, s)
#[n,s] = np.histogram(-np.log10(Dmax), 11)
#d = np.digitize(-np.log10(Dmax), s)
cmi = plt.get_cmap('jet')
cNorm = mpl.colors.Normalize(vmin=min(Dmax), vmax=max(Dmax))
#cNorm = mpl.colors.Normalize(vmin=140, vmax=240)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmi)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# scatter3D requires a 1D array for x, y, and z
# ravel() converts the 100x100 array into a 1x10000 array
p = ax.scatter3D(np.ravel(self.x_pred_encoded[:, 0]),
np.ravel(self.x_pred_encoded[:, 1]),
| np.ravel(self.x_pred_encoded[:, 2]) | numpy.ravel |
import os
from file_lengths import FileLengths
import pandas as pd
import numpy as np
import json
#path = os.path.abspath('../file_lengths.json')
fl = FileLengths()
df = np.array(fl.file_lengths)
#file_lengths = json.loads(path)
df = np.delete(df, 1, axis=1)
df = np.squeeze(df)
df = df.astype(np.float)
#35 seconds as a cutoff
hist, bin_edges = | np.histogram(df, bins=20, range=(0,40)) | numpy.histogram |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Library to handle SPM data.
This is the core module of all images retrieved by SPM and ToF-SIMS.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import scipy.optimize
import skimage
import skimage.exposure
import skimage.filters
import scipy.interpolate
from skimage import transform as tf
import copy
from .utils import CDF, funit
import sys
import matplotlib as mpl
import warnings
from .utils.misc import PB
try:
from skimage.filters import threshold_local
except:
# For compatibility with old versions of skimage
from skimage.filters import threshold_adaptive as threshold_local
class SPM_image:
"""
Main class to handle SPM images.
This class contains the pixels data of the images as well as it's real size.
It also provides a lot of tools to correct and perform various analysis and tasks on the image.
"""
def __init__(self, BIN, channel='Topography',
corr=None, real=None, zscale='?', _type='Unknown'):
"""
Create a new SPM_image
Parameters
----------
BIN : 2D numpy array
The pixel values of the image as a 2D numpy array
channel : string
The name of the channel. What does the image represents?
corr : string or None
'slope' : correct the SPM image for its slope (see pySPM.SPM.SPM_image.correct_slope)
'lines' : correct the SPM image for its lines (see pySPM.SPM.SPM_image.correct_lines)
'plane' : correct the SPM image by plane fitting (see pySPM.SPM.SPM_image.correct_plane)
real : None or dictionary
Information about the real size of the image {'x':width,'y':height,'unit':unit_name}
zscale : string
Unit used to describe the z-scale. (units of the data of BIN)
_type : string
represent the type of measurement
"""
self.channel = channel
self.direction = 'Unknown'
self.size = {'pixels': {'x': BIN.shape[1], 'y': BIN.shape[0]}}
if not real is None:
self.size['real'] = real
else:
self.size['real'] = {'unit': 'pixels',
'x': BIN.shape[1], 'y': BIN.shape[0]}
if not 'unit' in self.size['real']:
self.size['real']['unit'] = 'px'
self.pixels = BIN
self.type = _type
self.zscale = zscale
if corr is not None:
if corr.lower() == 'slope':
self.correct_slope()
elif corr.lower() == 'lines':
self.correct_lines()
elif corr.lower() == 'plane':
self.correct_plane()
def __add__(self, b):
"""
Add up two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels += b.pixels
New.channel += " + "+b.channel
elif type(b) in [int, float]:
New.pixels += b
New.channels += " + {:.2f}".format(b)
return New
def __sub__(self, b):
"""
Subtract two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels -= b.pixels
New.channel += " - "+b.channel
elif type(b) in [int, float]:
New.pixels -= b
New.channels += " - {:.2f}".format(b)
return New
def __mul__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels *= b.pixels
New.channel = "({})*{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels *= b
New.channels = "({})*{:.2f}".format(New.channel,b)
return New
def __div__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels /= b.pixels
New.channel = "({})/{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels /= b
New.channels = "({})/{:.2f}".format(New.channel,b)
return New
def pxs(self):
"""
Return the pixel size
"""
fxy = {xy: funit(self.size['real'][xy], self.size['real']['unit']) for xy in 'xy'}
return [(fxy[xy]['value']/self.size['pixels'][xy], fxy[xy]['unit']) for xy in 'xy']
def add_scale(self, length, ax=None, height=20, margin=5, color='w', loc=4, text=True, pixels=None, fontsize=20, edge_color='k', edge_width=3):
"""
Display a scale marker on an existing image
Parameters
----------
length : float
The length of the scale in real units
ax : matplotlib axis
if None the current axis will be taken (plt.gca())
height : int
The height of the scale bar in pixels
color : string
The color used to display the scale bar
loc : int
The location of the scale bar.
1 : top right
2 : top left
3 : bottom left
4 : bottom right
text : bool
display the size of the scale on top of it?
pixels : bool
Is the image plotted in ax with a x/y scale in pixels?
fontsize : float
The fontsize used to display the text
Example
-------
>>> img = pySPM.SPM_image()
>>> img.show()
>>> img.add_scale(50e-6, pixels=False);
Add a scale of 50 μm on an image displayed with real units
>>> img = pySPM.SPM_image()
>>> img.show(pixels=True)
>>> img.add_scale(50e-6);
Add a scale of 50 μm on an image displayed in pixels
"""
import matplotlib.patches
import matplotlib.patheffects as PathEffects
fL = length/self.size['real']['x']
L = self.size['pixels']['x']*fL
fH = height/self.size['pixels']['y']
if ax is None:
ax = plt.gca()
if pixels is None:
if hasattr(ax, 'isPixel'):
pixels = ax.isPixel
else:
pixels = False
flipped = False
if hasattr(ax, 'flipped'):
flipped = ax.flipped
if type(loc) is int:
assert loc in [1, 2, 3, 4]
ref = ax.transAxes.transform({1:(1-fL,0),2:(0,0),3:(0,1-fH),4:(1-fL,1-fH)}[loc])
if loc in [2,3]:
ref[0] += margin
else:
ref[0] -= margin
if loc in [1,2]:
ref[1] += margin
else:
ref[1] -= margin
else:
assert type(loc) in [tuple, list]
assert len(loc)==2
ref = ax.transData.transform(loc) + ax.transAxes.transform((-fL/2,-fH/2)) - ax.transAxes.transform((0,0))
inv = ax.transData.inverted()
ref = inv.transform(ref)
WH = inv.transform(ax.transAxes.transform((fL,fH)))-inv.transform(ax.transAxes.transform((0,0)))
rect = ax.add_patch(matplotlib.patches.Rectangle(ref, width=WH[0], height=WH[1], color=color))
if text:
r = funit(length, self.size['real']['unit'])
if r['unit'][0] == 'u':
r['unit'] = '$\\mu$' + r['unit'][1:]
if loc in [3,4]:
label_ref = [ref[0]+WH[0]/2, ref[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="top", ha="center")
else:
label_ref = [ref[0]+WH[0]/2, ref[1]+WH[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="bottom", ha="center")
ann.set_path_effects([PathEffects.withStroke(linewidth=edge_width, foreground=edge_color)])
def offset(self, profiles, width=1, ax=None, col='w', inline=True, **kargs):
"""
Correct an image by offsetting each row individually in order that the lines passed as argument in "profiles" becomes flat.
Parameters
----------
profiles: list of list
each sublist represent a line as [x1, y1, x2, y2] in pixels known to be flat
width : int, float
the line width in pixels used for better statistics
ax : matplotlib axis or None
If not None, axis in which the profiles will be plotted in
inline : bool
If True perform the correction on the current object, otherwise return a new image
col : string
matrplotlib color used to plot the profiles (if ax is not None)
labels : bool
display a label number with each profile
**kargs: arguments passed further to get_row_profile.
axPixels: set to True if you axis "ax" have the data plotted in pixel instead of real distance
Example
-------
Exampel if the data are plotted in pixels:
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False,axPixels=True)
>>> topo.show(pixels=True, ax=ax[0])
>>> topoC.show(ax=ax[1]);
Example if the data are plotted with real units
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False)
>>> topo.show(ax=ax[0])
>>> topoC.show(ax=ax[1]);
"""
offset = np.zeros(self.pixels.shape[0])
counts = np.zeros(self.pixels.shape[0])
for i, p in enumerate(profiles):
if kargs.get('labels', False):
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, label=str(i), **kargs)
else:
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, **kargs)
counts[y] += 1
offset[y[1:]] += np.diff(D)
counts[counts == 0] = 1
offset = offset/counts
offset = np.cumsum(offset)
offset = offset.reshape((self.pixels.shape[0], 1))
if inline:
self.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return self
else:
C = copy.deepcopy(self)
C.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return C
def pxRect2Real(self, xy, width, height):
"""
Transform a xy, width, height data in pixels to an equivalentz one with real units
"""
ll = self.px2real(xy[0],xy[1])
ur = self.px2real(xy[0]+width,xy[1]+height)
return ll,ur[0]-ll[0],ur[1]-ll[1]
def get_row_profile(self, x1, y1, x2, y2, width=1, col='C1', ax=None, alpha=0, **kargs):
"""
Get a profile per row along a given line. This function is mainly useful for the function offset.
x1, y1, x2, y2: int
coordinates of the line.
width : int
the width of the line used for statistics (in pixels)
col: string
color used to plot the line position
ax : matplotlib axis
axis in which the lines position will plotted
alpha : float
The alpha channel of the line color (≥0 and ≤1)
**kargs:
line style arguments: linewidth, color and linestyle
axis units: axPixels set to True if ax has the image plotted in pixels.
Returns
-------
Y coordinates : 1D numpy array
distance along the profile starting at 0
Z coordinates : 1D numpy array
profile
"""
plotargs = { key: kargs[key] for key in ['linewidth', 'color', 'linestyle'] if key in kargs }
if y2 < y1:
x1, y1, x2, y2 = x2, y2, x1, y1
if ax is not None:
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if kargs.get('axPixels', False):
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], col)
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], col)
ax.plot((x1, x2), (y1, y2), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2),.5*(y1+y2)), color=col)
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle((x1+dx,y1+dy),width, d, -np.degrees(np.arctan2(x2-x1,y2-y1)), color=col, alpha=alpha))
else:
h = self.pixels.shape[0]
pxs = self.size['real']['x'] / self.pixels.shape[1]
pys = self.size['real']['y'] / h
ax.plot([(x1-dx)*pxs, (x1+dx)*pxs], [(h-(y1-dy))*pys, (h-(y1+dy))*pys], col)
ax.plot([(x2-dx)*pxs, (x2+dx)*pxs], [(h-(y2-dy))*pys, (h-(y2+dy))*pys], col)
ax.plot((x1*pxs, x2*pxs), ((h-y1)*pys, (h-y2)*pys), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2)*pxs,.5*(2*h-y1-y2)*pys), color=col)
if alpha>0:
import matplotlib.patches
W = np.sqrt((2*dx*pxs)**2+(2*dy*pys)**2)
L = np.sqrt(((x2-x1)*pxs)**2+((y2-y1)*pys)**2)
ax.add_patch(matplotlib.patches.Rectangle(((x1+dx)*pxs,(y1+dy)*pys), W, L, -np.degrees(np.arctan2((x2-x1)*pxs,(y2-y1)*pys)), color=col, alpha=alpha))
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
I = scipy.interpolate.interp2d(x, y, np.flipud(self.pixels))
Y = np.arange(y1, y2+1)
V = np.zeros(len(Y))
for w in np.arange(width):
xl = np.linspace(x1-(width-1)/2.+w, x2-(width-1)/2.+w, len(Y))
for i in range(len(Y)):
Z = I(xl[i], Y[i])
V[i] += Z
return Y, V/width
def correct_median_diff(self, inline=True):
"""
Correct the image with the median difference
"""
N = self.pixels
# Difference of the pixel between two consecutive row
N2 = np.vstack([N[1:, :], N[-1:, :]])-N
# Take the median of the difference and cumsum them
C = np.cumsum(np.median(N2, axis=1))
# Extend the vector to a matrix (row copy)
D = np.tile(C, (N.shape[0], 1)).T
if inline:
self.pixels = N-D
else:
New = copy.deepcopy(self)
New.pixels = N-D
return New
def correct_slope(self, inline=True):
"""
Correct the image by subtracting a fitted slope along the y-axis
"""
s = np.mean(self.pixels, axis=1)
i = np.arange(len(s))
fit = np.polyfit(i, s, 1)
if inline:
self.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return New
def correct_plane(self, inline=True, mask=None):
"""
Correct the image by subtracting a fitted 2D-plane on the data
Parameters
----------
inline : bool
If True the data of the current image will be updated otherwise a new image is created
mask : None or 2D numpy array
If not None define on which pixels the data should be taken.
"""
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
X0, Y0 = np.meshgrid(x, y)
Z0 = self.pixels
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
A = np.column_stack((np.ones(Z.ravel().size), X.ravel(), Y.ravel()))
c, resid, rank, sigma = np.linalg.lstsq(A, Z.ravel(), rcond=-1)
if inline:
self.pixels -= c[0] * \
np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return self
else:
New = copy.deepcopy(self)
New.pixels -= c[0]*np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return New
def correct_lines(self, inline=True):
"""
Subtract the average of each line for the image.
if inline is True the current data are updated otherwise a new image with the corrected data is returned
"""
if inline:
self.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return New
def dist_v2(self, pixel=False):
"""
Return a 2D array with the distance between each pixel and the closest border.
Might be usefull for FFT filtering
"""
if pixel:
dx = 1
dy = 1
else:
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
x2 = np.arange(self.size['pixels']['x'])
x2 = (np.minimum(x2, self.size['pixels']['x']-x2) * dx)**2
y2 = np.arange(self.size['pixels']['y'])
y2 = (np.minimum(y2, self.size['pixels']['y'] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def inv_calc_flat(self, d, l=0.1):
"""
Function used for inverse MFM calculation (inspired from http://qmfm.empa.ch/qmfm/)
The function is in its early devlopment stage as not used by the developed.
Parameters
----------
d : float
Height distance in the input data
l : float
Tikhonov parameter for the deconvolution
"""
work_image = self.pixels
ny, nx = self.pixels.shape
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
k = self.dist_v2()
k[0, 0] = 1e-10
tf = np.exp(-d*k)
tf[0, 0] = np.mean(tf)
tf /= 2
tf *= 1-np.exp(-d * k)
recon_tf = np.ones(tf.shape) / (tf+l*np.ones(tf.shape) / np.conj(tf))
tf *= recon_tf
return np.real(np.fft.ifft2(np.fft.fft2(work_image)*recon_tf))
def get_extent(self):
"""
Get the image extent in real data
"""
if 'recorded' in self.size:
W = self.size['recorded']['real']['x']
H = self.size['recorded']['real']['y']
else:
W = self.size['real']['x']
H = self.size['real']['y']
return (0, W, 0, H)
def show(self, ax=None, sig=None, cmap=None, title=None,
adaptive=False, dmin=0, dmax=0, pixels=False, flip=False, wrap=None, mul=1, symmetric=False, **kargs):
"""
Function to display the image with a lot of parametrization
Parameters
----------
ax : matplotlib axis or None
matplotlib axis if given otherwise current axis will be used (plt.gca())
sig : float
sigma values to adjust the contrast range around the mean ±sig times the standard-deviation
cmap : string
colormap name used. By default a gray map is used. If the zscale of the data are in 'meter' (i.e. topography data) the 'hot' colormap is used
title : string
The title of the plot. By default is the channel name
adaptive : bool
The color scale used is linear. If adaptive is True a non linear color scale is used in order that each color is used with the same amount.
dmin : float
minimum value adjustment used for the colorscale
dmax: float
maximum value adjustment used for the colorscale
pixels : bool
Display the image with x/y-labels with real unit. If pixels is True, the axes are in pixels
flip : bool
Flip the image upside-down
wrap : Nont or int
wrap the title to a width of wrap chars
symmetric : bool
If True will place the middle of the colorscale to the value 0.
This is specially usefull for diverging colormaps such as : BrBG, bwr, coolwarm, seismiv, spectral, etc.
level : float
level should be ≥0 and <50. Adjust the lower and upper colorscale to level% and (100-level)% of the data range.
e.g. if level=1, the colorscale will display 1-99% of the data range
vmin : float
Minimum value used for the colorscale
vmax : flaot
Maximum value used for the colorscale
Returns
-------
matplotlib.image.AxesImage
matplolib axis instance returned by imshow
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, (ax, ax2) = plt.subplots(2, 3, figsize=(15, 10))
>>> topo.show(ax=ax[0], cmap='gray', title="color map=\"gray\"")
>>> topo.show(ax=ax[1], sig=2, title="standard deviation=2")
>>> topo.show(ax=ax[2], adaptive=True, title="Adaptive colormap")
>>> topo.show(ax=ax2[0], dmin=4e-8, cmap='gray', title="raise the lowest value for the colormap of +40nm")
>>> topo.show(ax=ax2[1], dmin=3e-8, dmax=-3e-8, cmap='gray',title="raise lower of +30nm and highest of -30nm")
>>> topo.show(ax=ax2[2], pixels=True, title="Set axis value in pixels");
"""
mpl.rc('axes', grid=False)
if ax is None:
ax = plt.gca()
ax.src = self
if title == None:
title = u"{0} - {1}".format(self.type, self.channel)
if wrap is not None:
title = "\n".join([title[i*wrap:(i+1)*wrap]
for i in range(int(len(title)/wrap)+1)])
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1 or unit in ['pixels']:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
W = self.size['real']['x']
H = self.size['real']['y']
fact = int(np.floor(np.log(W)/np.log(10)/3))
isunit += fact
W, H = W/10**(fact*3), H/10**(fact*3)
if cmap == None:
cmap = 'gray'
if unit == 'm' and self.channel == "Topography":
cmap = 'hot'
mi, ma = np.nanmin(self.pixels), np.nanmax(self.pixels)
if adaptive:
img = np.asarray(256**2*(self.pixels-mi)/(ma-mi), dtype=np.uint16)
mi, ma = 0, 1
img = skimage.exposure.equalize_adapthist(img, clip_limit=0.03)
else:
img = mul*self.pixels
mi *= mul
ma *= mul
if sig == None:
vmin = mi+dmin
vmax = ma+dmax
else:
std = np.nanstd(img)
avg = np.nanmean(img)
vmin = avg - sig * std
vmax = avg + sig * std
if 'level' in kargs:
if kargs['level'] < 0 or kargs['level']>=50:
raise ValueError("The level shoud have a value in [0,50)")
vmax = np.percentile(img, 100-kargs['level'])
vmin = np.percentile(img, kargs['level'])
del kargs['level']
if 'vmin' in kargs:
vmin = kargs['vmin']
del kargs['vmin']
if 'vmax' in kargs:
vmax = kargs['vmax']
del kargs['vmax']
if symmetric:
vmax = abs(max(vmin,vmax))
vmin = -vmax
if not flip:
ax.flipped = False
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), extent=[0, W, 0, H], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.flipped = True
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), cmap=cmap, extent=[0, W, 0, H], vmin=vmin, vmax=vmax, **kargs)
if pixels:
ax.set_xlim((0, self.pixels.shape[1]))
if flip:
ax.set_ylim((0, self.pixels.shape[0]))
else:
ax.set_ylim((self.pixels.shape[0], 0))
else:
ax.set_xlim((0,W))
if flip:
ax.set_ylim((H,0))
else:
ax.set_ylim((0,H))
if not pixels:
if isunit != 6:
u = sunit[isunit]
if u == 'u':
u = '$\\mu$'
ax.set_xlabel(u'x [{0}{1}]'.format(u, unit))
ax.set_ylabel(u'y [{0}{1}]'.format(u, unit))
else:
ax.set_xlabel(u'x [{0}]'.format(unit))
ax.set_ylabel(u'y [{0}]'.format(unit))
if title != None:
ax.set_title(title)
return r
def real2px(self, x, y):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
return self.real2pixels(x,y)
def real2pixels(self, x, y, float=False):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not float:
px = np.digitize(x, np.linspace(0,self.size['real']['x']/(10**fact),self.pixels.shape[1]), right=True)
py = np.digitize(y, np.linspace(0,self.size['real']['y']/(10**fact),self.pixels.shape[0]), right=False)
else:
px = x*(self.pixels.shape[1]-1)/(self.size['real']['x']/(10**fact))
py = y*(self.pixels.shape[0]-1)/(self.size['real']['y']/(10**fact))
return px, py
def px2real(self, x, y):
"""
Transform a (x,y) value from pixels to real
Units are the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
rx = x*self.size['real']['x']/(10**fact)/self.pixels.shape[1]
ry = (self.pixels.shape[0]-y)*self.size['real']['y']/(10**fact)/self.pixels.shape[0]
return rx, ry
def circular_profile(self, x0, y0, Ra=1, Rn=0, width=1, N=20, A=0, B=360,\
cmap='jet', axImg=None, axPolar=None, axProfile=None, plotProfileEvery=1,\
xtransf=lambda x: x*1e9, ytransf=lambda x:x*1e9,\
ToFcorr=False, fit=lambda x, *p: p[3]+p[2]*CDF(x, *p[:2]), p0=None, errors=False, bounds=(-np.inf, np.inf), fakefit=False, **kargs):
"""
Create radial profiles from point x0,y0 with length Ra (outer radius) and Rn (negative Radius).
Start from angle A° to angle B° with N profiles.
If you want to apply the ToF-correction, please set ToFcorr to the number of scans used to record the ToF-SIMS image.
Return the fitting uncertainty on sigma if errors is set to True
The fitting function can be adjusted by fit and the default parameters by p0 which is an array of function where the first parameter passed will be the x-values and the second the y-values.
"""
from matplotlib import colors, cm
# Create a colormap for each profile
CM = plt.get_cmap(cmap)
cNorm = colors.Normalize(vmin=0, vmax=N)
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=CM)
res = []
cov = []
angles = []
assert A<B
for i, angle in enumerate(np.linspace(A, B, N)):
a = np.radians(angle)
angles.append(a)
l, p = self.get_profile(
x0-Rn*np.cos(a),
y0+Rn*np.sin(a),
x0+Ra*np.cos(a),
y0-Ra*np.sin(a),
ax=axImg, width=width, color=scalarMap.to_rgba(i), **kargs)
if width==0:
profile = p
else:
profile = np.mean(p, axis=1)
if ToFcorr:
profile = -np.log(1.001-profile/ToFcorr)
if p0 is None:
AC = np.mean(profile[:len(l)//2])
AE = np.mean(profile[len(l)//2:])
if AC<AE:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), np.max(profile)-np.min(profile), np.min(profile) ]
else:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), -np.max(profile)+np.min(profile), np.max(profile) ]
else:
for j,p in enumerate(p0):
if callable(p):
p0[j] = p(l,profile)
if kargs.get('debug',False):
print("calculate fit parameters are", p0)
if not fakefit:
p0, pcov = scipy.optimize.curve_fit(fit, l , profile, p0)
else:
pcov = np.zeros((len(p0),len(p0)))
res.append(p0)
cov.append([np.sqrt(abs(pcov[i,i])) for i in range(len(p0))])
if axProfile and i%plotProfileEvery == 0:
axProfile.plot(xtransf(l-p0[0]), profile, color=scalarMap.to_rgba(i), linestyle=':')
axProfile.plot(xtransf(l-p0[0]), fit(l,*p0), color=scalarMap.to_rgba(i))
# close loop
if A%360 == B%360:
angles.append(angles[0])
res.append(res[0])
cov.append(cov[0])
# Plot polar
angles = np.array(angles)
res = np.array(res)
cov = np.array(cov)
fact = 2*np.sqrt(2*np.log(2))
if axPolar:
axPolar.plot(angles, ytransf(res[:,1]), color=kargs.get('sig_color','C0'), label="$\\sigma$")
axPolar.plot(angles, ytransf(fact*res[:,1]), color=kargs.get('fwhm_color','C1'), label="FWHM")
if errors:
axPolar.fill_between(angles, ytransf(res[:,1]-cov[:,1]),ytransf(res[:,1]+cov[:,1]), color=kargs.get('sig_color','C0'), alpha=kargs.get('fillalpha',.5))
axPolar.fill_between(angles, fact*ytransf(res[:, 1]-cov[:, 1]), ytransf(res[:, 1]+cov[:, 1]), color=kargs.get('fwhm_color', 'C1'), alpha=kargs.get('fillalpha',.5))
return angles, res, cov
def get_profile(self, x1, y1, x2, y2, width=0, ax=None, pixels=True, color='w', axPixels=None, **kargs):
"""
retrieve the profile of the image between pixel x1,y1 and x2,y2
Parameters
----------
x1, y1, x2, y2 : ints
coordinates for the profile
ax : matplotlib axis
defines the matplotlib axis on which the position of the profile should be drawn (in not None)
width : int
the width of the profile (for averaging/statistics) in pixels
color : string
color used to plot the profiles lines
axPixels : bool
If True the image plotted in the ax axis is displayed in pixels
Returns
-------
x data : 1D numpy array
profile : 1D numpy array
"""
if kargs.get('debug',False):
print("get_profile input coordinates:", x1, y1, x2, y2)
if ax is not None and axPixels is None:
if hasattr(ax, 'isPixel'):
axPixels = ax.isPixel
if axPixels is None:
axPixels = pixels
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not pixels:
if kargs.get('debug', False):
print("Image range (real scale):", self.size['real']['x']/(10**fact), self.size['real']['y']/(10**fact))
x1, y1 = self.real2pixels(x1, y1, float=True)
x2, y2 = self.real2pixels(x2, y2, float=True)
y1 = self.pixels.shape[0]-y1
y2 = self.pixels.shape[0]-y2
if kargs.get('debug', False):
print("Pixel coordinates:", x1, y1, x2, y2)
if not axPixels:
xvalues, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color,\
transx = lambda x: x*(self.size['real']['x']/(10**fact))/self.pixels.shape[1],\
transy = lambda x: (self.pixels.shape[0]-x)*(self.size['real']['y']/(10**fact))/self.pixels.shape[0],\
**kargs)
else:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color, **kargs)
else:
if axPixels:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color, **kargs)
else:
values, p = get_profile(np.flipud(self.pixels), x1, y1, x2, y2, ax=ax, width=width, color=color,\
transx = lambda x: x*(self.size['real']['x']/(10**fact))/self.pixels.shape[1],\
transy = lambda x: (self.pixels.shape[0]-x)*(self.size['real']['y']/(10**fact))/self.pixels.shape[0],\
**kargs)
dx = (x2-x1)*self.size['real']['x']/self.size['pixels']['x']
dy = (y2-y1)*self.size['real']['y']/self.size['pixels']['y']
rd = np.sqrt(dx**2+dy**2)
xvalues = np.linspace(0, rd, len(p))
return xvalues, p
def plot_profile(self, x1, y1, x2, y2, width=0, ax=None, pixels=True, img=None, imgColor='w', ztransf=lambda x: x, zunit=None, **kargs):
"""
Retrieve and plot a profile from an image
Parameters
----------
x1, y1, x2, y2 : int
coordinate of the profile in real size or in pixels (if pixels is True)
width : float
the width of the profiles in pixels for better statistics
ax : matplotlib axis
The axis in which the profile will be plotted
pixels : bool
If True the coordinates are given in pixels and not in real units
img : matplotlib axis
The axis in which the profile position will be drawn
imgColor : string
The color used to display the profile positions
ztransf : function
function to transform the profile data. This can be used to scale the data.
Most profiles are retrieved in 'm' and a 'nm' value can be used by using ztransf=lambda x: x*1e9
zunit : string
the zunit name used if ztransft is used
color : string
The color of the profile
col : string
can be used instead of color
stdplot : bool
If True display the ±nσ plots where n is given by the sig parameter
sig : int
The number of sigmas used in stdplot
label : string
The label used for plotting the profile (useful if you perform a ax.legend() afterwards)
Returns
-------
dictionary : {'plot': matplotlib_plot_instance, 'l': profile_xaxis, 'z': profile_yaxis}
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topo.plot_profile(70, 100, 170, 200, ax=ax[1], img=ax[0], ztransf=lambda x:x*1e9, zunit='nm');
>>> topo.show(ax=ax[0], pixels=True);
"""
col = kargs.get('color',kargs.get('col','C0'))
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if ax == None:
ax = plt.gca()
xvalues, p = self.get_profile(x1, y1, x2, y2, width=width, color=imgColor, ax=img, pixels=pixels, **kargs)
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = (x2-x1)
dy = (y2-y1)
if pixels:
rd = d
u = ''
unit = 'px'
else:
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
isunit += fact//3
if isunit != 6:
u = sunit[isunit]
else:
u=''
if u == 'u':
u = '$\\mu$'
rd = np.sqrt(dx**2+dy**2)
xvalues = np.linspace(0, rd, len(p))
lab = kargs.get("label", "")
if width < 2:
profile = ztransf(p)
else:
profile = ztransf(np.mean(p, axis=1))
s = np.std(p)
if kargs.get('stdplot', False):
for ns in range(1, kargs.get('sig', 2)+1):
ax.fill_between(xvalues, profile-ns*s, profile+ns*s, color=col, alpha=.2, label=[lab+' ($\\sigma,\ldots {}\\sigma$)'.format(kargs.get('sig',2)),None][ns>1])
Plot = ax.plot(xvalues, profile, color=col, linewidth=kargs.get('linewidth',1),linestyle=kargs.get('linestyle','-'), label=lab+[' (mean)',''][width<2])
if kargs.get('min',False):
minStyle = kargs.get('minStyle', kargs.get('minmaxStyle', '--'))
minColor = kargs.get('minColor', kargs.get('minmaxColor', col))
minMarker = kargs.get('minMarker', kargs.get('minmaxMarker', ''))
ax.plot(xvalues, np.min(p, axis=1), color=minColor, linewidth=kargs.get('linewidth',1),linestyle=minStyle, marker=minMarker, label=lab+' (min)')
if kargs.get('max', False):
maxStyle = kargs.get('maxStyle',kargs.get('minmaxStyle','--'))
maxColor = kargs.get('maxColor',kargs.get('minmaxColor',col))
maxMarker = kargs.get('maxMarker',kargs.get('minmaxMarker',''))
ax.plot(xvalues, np.max(p, axis=1), color=maxColor, linestyle=maxStyle, linewidth=kargs.get('linewidth',1), marker=maxMarker, label=lab+' (max)')
ax.set_xlabel("Distance [{1}{0}]".format(unit, u))
if zunit is not None:
ax.set_ylabel("{1} [{0}]".format(zunit, self.channel))
else:
ax.set_ylabel("{1} [{0}]".format(self.zscale, self.channel))
return {'plot': Plot, 'l': xvalues, 'z': profile}
def get_bin_threshold(self, percent, high=True, adaptive=False, binary=True, img=False):
"""
Threshold the image into binary values
Parameters
----------
percent : float
The percentage where the thresholding is made
high : bool
If high a value of 1 is returned for values > percent
adaptive : bool
If True, performs an adaptive thresholding (see skimage.filters.threshold_adaptive)
binary : bool
If True return bool data (True/False) otherwise numeric (0/1)
img : bool
If True return a SPM_image otherwise a numpy array
"""
if adaptive:
if binary:
return self.pixels > threshold_local(self.pixels, percent)
return threshold_local(self.pixels, percent)
mi = np.min(self.pixels)
norm = (self.pixels-mi)/(np.max(self.pixels)-mi)
if high:
r = norm > percent
else:
r = norm < percent
if not img:
if binary:
return r
return np.ones(self.pixels.shape)*r
else:
I = copy.deepcopy(self)
I.channel = "Threshold from "+I.channel
if binary:
I.pixels = r
else:
I.pixels = np.ones(self.pixels.shape)*r
return I
def spline_offset(self, X, Y, Z=None, inline=True, ax=None, output='img', **kargs):
"""
subtract a spline interpolated by points corrdinates.
if Z is None, the image values will be used (default)
"""
if ax is not None:
if 'num' in kargs and kargs['num']:
text_color = 'k'
if 'text_color' in kargs:
text_color = kargs['text_color']
del kargs['text_color']
for i in range(len(X)):
l = self.pixels.shape[1]-X[i] < 20
ax.annotate(str(i), (X[i], Y[i]), ([
5, -5][l], 0), textcoords='offset pixels', va="center", ha=["left", "right"][l], color=text_color)
del kargs['num']
ax.plot(X, Y, 'o', **kargs)
import scipy.interpolate
T = np.flipud(self.pixels) - np.min(self.pixels)
if Z is None:
Z = [T[Y[i], X[i]] for i in range(len(X))]
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
xx, yy = np.meshgrid(x, y)
I = scipy.interpolate.SmoothBivariateSpline(X, Y, Z)
z = I.ev(xx, yy)
if inline:
self.pixels -= z
return z
else:
if output == 'img':
New = copy.deepcopy(self)
New.pixels -= z
return New
elif output == 'spline':
return z
else:
raise ValueError(
"The output parameter should be either 'img' or 'spline'")
def get_shadow_mask(self, angle, BIN=None, prog=False):
"""
If an image is recorded with a beam incident with a certain angle, the topography will shadow the data.
This function generates the shadow mask for a given topography and a given incident angle.
Parameters
----------
angle : float
The incidence angle in degrees
BIN : numpy array
Data. If given will move the recorded pixels at the correct x,y positions
prog : bool
display a progressbar ?
Note
----
This function is old, might not be optimized or working properly
"""
if BIN is not None:
BIN = BIN*1.0
slope = np.tan(np.radians(angle))
neg = False
if slope < 0:
neg = True
slope = -slope
topo = np.fliplr(self.pixels)
if BIN is not None:
BIN = np.fliplr(BIN)
else:
topo = self.pixels
x = np.linspace(0, self.size['real']['x'], self.pixels.shape[1])
if self.size['real']['unit'] == 'um':
x *= 1e-6
elif self.size['real']['unit'] == 'nm':
x *= 1e-9
mask = np.zeros(self.pixels.shape)
AFM_bin_shadow = np.zeros(self.pixels.shape)
Y = range(self.pixels.shape[0])
if prog:
Y = PB(Y)
for yi in Y:
for xi in range(self.pixels.shape[1]):
cut = self.pixels.shape[1]-2
y_ray = slope*(x-x[xi]) + topo[yi, xi]
while cut > xi and y_ray[cut] > topo[yi, cut]:
cut -= 1
if xi == cut:
if BIN is not None:
AFM_bin_shadow[yi, xi] = BIN[yi, xi]
continue
# Cut has been found
if BIN is not None:
x1 = x[cut]
x2 = x[cut+1]
y1 = topo[yi, cut]
y2 = topo[yi, cut+1]
x0 = x[xi]
y0 = topo[yi, xi]
if y2 == y1:
x_cut = (y1+slope*x0-y0)/slope
y_cut = y1
else:
numerator = x1/(x2-x1)+(y0-slope*x0-y1)/(y2-y1)
denominator = 1/(x2-x1)-slope/(y2-y1)
x_cut = numerator / denominator
y_cut = slope*(x_cut-x0)+y0
if x_cut >= x1 and x_cut <= x2:
y1 = BIN[yi, cut]
y2 = BIN[yi, cut+1]
yint = (((y2-y1)/(x2-x1))*(x_cut-x1))+y1
else:
yint = BIN[yi, xi]
AFM_bin_shadow[yi, xi] = yint
mask[yi, xi] = 1
if neg:
mask = np.fliplr(mask)
AFM_bin_shadow = np.fliplr(AFM_bin_shadow)
if BIN is not None:
return (mask, AFM_bin_shadow)
return mask
def adjust_position(self, fixed):
"""
Shift the current pixels to match a fixed image.
The shift is determined by position where the cross-correlation is maximized.
"""
adj = copy.deepcopy(self)
cor = np.fft.fft2(fixed.pixels)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(self.pixels)))
cor = cor / fixed.pixels.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj.pixels = np.roll(self.pixels, shift[0], axis=0)
adj.pixels = np.roll(adj.pixels, shift[1], axis=1)
return adj
def align(self, tform, cut=True):
"""
Apply an Affine transform on the data
Parameters
----------
tform : skimage.transform
the affine transform to perform
cut : bool
If True cut the data
"""
New = copy.deepcopy(self)
New.pixels = tf.warp(self.pixels, tform, preserve_range=True)
if not cut:
return New
cut = [0, 0] + list(self.pixels.shape)
if tform.translation[0] >= 0:
cut[2] -= tform.translation[0]
elif tform.translation[0] < 0:
cut[0] -= tform.translation[0]
if tform.translation[1] >= 0:
cut[1] += tform.translation[1]
elif tform.translation[1] < 0:
cut[3] += tform.translation[1]
cut = [int(x) for x in cut]
New.cut(cut, inplace=True)
return New, cut
def get_fft(self):
"""
return the FFT2 transform opf the image
"""
return np.fft.fftshift(np.fft.fft2(self.pixels))
def corr_fit2d(self, nx=2, ny=1, poly=False, inline=True, mask=None):
"""
Subtract a fitted 2D-polynom of nx and ny order from the data
Parameters
----------
nx : int
the polynom order for the x-axis
ny : int
the polynom order for the y-axis
poly : bool
if True the polynom is returned as output
inline : bool
create a new object?
mask : 2D numpy array
mask where the fitting should be performed
"""
r, z = fit2d(self.pixels, nx, ny, mask=mask)
if inline:
self.pixels -= z
else:
N = copy.deepcopy(self)
N.pixels -= z
if poly:
return N, z
return N
if poly:
return z
return self
def zero_min(self, inline=True):
"""
Shift the values so that the minimum becomes zero.
"""
if inline:
self.pixels -= np.min(self.pixels)
return self
else:
N = copy.deepcopy(self)
N.pixels -= np.min(N.pixels)
return N
def filter_lowpass(self, p, inline=True):
"""
Execute a lowpass filter on the data
"""
F = self.get_fft()
mask = self.getRmask() < p
if inline:
self.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
else:
C = copy.deepcopy(self)
C.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
return C
def _resize_infos(self):
"""
Internal to recalculate the real size when the image is cropped or cut
"""
self.size['real']['x'] *= self.pixels.shape[1]/self.size['pixels']['x']
self.size['real']['y'] *= self.pixels.shape[0]/self.size['pixels']['y']
self.size['pixels']['x'] = int(self.pixels.shape[1])
self.size['pixels']['y'] = int(self.pixels.shape[0])
if 'recorded' in self.size:
self.size['recorded']['real']['x'] \
*= (self.pixels.shape[1]/self.size['pixels']['x'])
self.size['recorded']['real']['y'] \
*= (self.pixels.shape[0]/self.size['pixels']['y'])
self.size['recorded']['pixels']['x'] = int(self.pixels.shape[1])
self.size['recorded']['pixels']['y'] = int(self.pixels.shape[0])
def filter_scars_removal(self, thresh=.5, inline=True):
"""
Filter function to remove scars from images.
"""
if not inline:
C = copy.deepcopy(self)
else:
C = self
for y in range(1, self.pixels.shape[0]-1):
b = self.pixels[y-1, :]
c = self.pixels[y, :]
a = self.pixels[y+1, :]
mask = np.abs(b-a) < thresh*(np.abs(c-a))
C.pixels[y, mask] = b[mask]
if not inline:
return C
return self
def cut(self, c, inline=False, pixels=True, **kargs):
"""
Clip/Crop the image
Parameters
----------
c : list [llx,lly,urx,ury]
list of the lowe-left (ll) and upper-right (ur) coordinates
inline: bool
perform the transformation inline or produce a new SPM_image?
pixels : bool
Are the coordinates given in pixels?
Returns
-------
self if inplace, clipped SPM_image otherwises2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))
"""
if 'inplace' in kargs:
inline=kargs['inplace']
if kargs.get('debug',False):
print("cut) Input coordinates:", c)
if not pixels:
c = [z for s in zip(*self.real2pixels(c[0::2], c[1::2])) for z in s]
if kargs.get('debug',False):
print("cut) pixel coordinates:", c)
if not inline:
new = copy.deepcopy(self)
new.pixels = cut(self.pixels, c, **kargs)
new._resize_infos()
return new
else:
self.pixels = cut(self.pixels, c, **kargs)
self._resize_infos()
return self
def zoom(self, zoom_factor, inplace=False, order=3):
"""
Resize the image to a new pixel size (but keep the real size) by pixel interpolation.
Parameters
----------
zoom_factor : float
> 1: up sampling
< 1: down sampling
order : int
The spline interpolation order to use. (default: 3). Use 0 for binary or very sharp images.
inplace : bool
create a new image?
"""
from scipy.ndimage.interpolation import zoom
if not inplace:
new = copy.deepcopy(self)
new.pixels = zoom(new.pixels, zoom_factor, order=order)
new.size['pixels']['x'] = new.pixels.shape[1]
new.size['pixels']['y'] = new.pixels.shape[0]
return new
else:
self.pixels = zoom(self.pixels, zoom_factor, order=order)
self.size['pixels']['x'] = self.pixels.shape[1]
self.size['pixels']['y'] = self.pixels.shape[0]
return self
# Note: The following functions are not part of the SPM_image class.
# All following functions are performed on numpy arrays
def cut(img, c, **kargs):
"""
Clip / Crop a numpy array
Parameters
----------
img : 2D numpy array
The input image array
c : list [llx, lly, urx, ury]
the lower-left (ll) and upper-right (ur) coordinates used for the cropping
"""
from .utils.geometry import Bbox
if kargs.get('debug',False):
print("cut in x", c[0], "->", c[2], " - in y", c[1], "->", c[3])
if isinstance(c, Bbox):
c = [c.left, c.bottom, c.right, c.top]
if c[3] < c[1]:
c = [c[0],c[3],c[2],c[1]]
if c[2] < c[0]:
c = [c[2],c[1],c[0],c[3]]
if c[2]-c[0] == img.shape[1] and c[3]-c[1] == img.shape[0]:
raise Exception("Reshaping the same array again?")
return img[c[1]:c[3], c[0]:c[2]]
def normalize(data, sig=None, vmin=None, vmax=None):
"""
Normalize the input data. Minimum_value -> 0 and maximum_value -> 1
Parameters
----------
data : numpy array
input data
sig : float or None
if not None:
mean(data)-sig*standard_deviation(data) -> 0
mean(data)+sig*standard_deviation(data) -> 1
vmin : float or None
if not None, define the lower bound i.e. vmin -> 0
vmax : float or None
if not None, defines the upper bound i.e. vmax -> 0
Note
----
All values below the lower bound will be = 0
and all values above the upper bound will be = 1
"""
if sig is None:
mi = np.min(data)
ma = np.max(data)
else:
s = sig*np.std(data)
mi = np.mean(data)-s
ma = np.mean(data)+s
if vmin is not None:
mi = vmin
if vmax is not None:
ma = vmax
N = (data-mi)/(ma-mi)
N[N < 0] = 0
N[N > 1] = 1
return N
def imshow_sig(img, sig=1, ax=None, **kargs):
"""
Shortcut to plot a numpy array around it's mean with bounds ±sig sigmas
Parameters
----------
img : 2D numpy array
input image to display
sig : float
The number of standard-deviation to plot
ax : matplotlib axis
matplotlib axis to use. If None, the current axis (plt.gca() will be used).
**kargs : additional parameters
will be passed to the imshow function of matplotls2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))ib
"""
if ax == None:
fig, ax = plt.subplots(1, 1)
std = np.std(img)
avg = np.mean(img)
vmin = avg - sig * std
vmax = avg + sig * std
ax.imshow(img, vmin=vmin, vmax=vmax, **kargs)
def adjust_position(fixed, to_adjust, shift=False):
""" Shift the current pixels to match a fixed image by rolling the data"""
adj = copy.deepcopy(to_adjust)
cor = np.fft.fft2(fixed)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(to_adjust)))
cor = cor / to_adjust.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj = np.roll(to_adjust, shift[0], axis=0)
adj = np.roll(adj, shift[1], axis=1)
if shift:
return adj, shift
return adj
def tukeyfy(A, alpha, type='default'):
"""
Apply a Tukey window on the current image
Parameters
----------
A : 2D numpy array
input array
alpha : float
Size of the Tukey windows in percent of the image (≥0 and ≤1)
type : string
if not "default" perform a mean centering (data will blend down to its mean instead of 0)
"""
tuky = tukeywin(A.shape[0], alpha)
tukx = tukeywin(A.shape[1], alpha)
tuk = np.multiply(tukx[:, None].T, tuky[:, None])
if type is 'default':
return A * tuk
avg = np.mean(A)
return avg+(A-avg) * tuk
def tukeywin(window_length, alpha=0.5):
'''The Tukey window, also known as the tapered cosine window, can be regarded as a cosine lobe of width \alpha * N / 2
that is convolved with a rectangle window of width (1 - \alpha / 2). At \alpha = 1 it becomes rectangular, and
at \alpha = 0 it becomes a Hann window.
We use the same reference as MATLAB to provide the same results in case users compare a MATLAB output to this function
output
Reference
---------
http://www.mathworks.com/access/helpdesk/help/toolbox/signal/tukeywin.html
'''
# Special cases
if alpha <= 0:
return np.ones(window_length) # rectangular window
elif alpha >= 1:
return np.hanning(window_length)
# Normal case
x = np.linspace(0, 1, window_length)
w = np.ones(x.shape)
# first condition 0 <= x < alpha/2
first_condition = x < alpha/2
w[first_condition] = 0.5 * \
(1 + np.cos(2*np.pi/alpha * (x[first_condition] - alpha/2)))
# second condition already taken care of
# third condition 1 - alpha / 2 <= x <= 1
third_condition = x >= (1 - alpha/2)
w[third_condition] = 0.5 * \
(1 + np.cos(2*np.pi/alpha * (x[third_condition] - 1 + alpha/2)))
return w
def overlay(ax, mask, color, **kargs):
"""
Plot an overlay on an existing axis
Parameters
----------
ax : matplotlib axis
input axis
mask : 2D numpy array
Binary array where a mask should be plotted
color : string
The color of the mask to plot
**kargs: additional parameters
passed to the imshow function of matploltib
"""
m = ma.masked_array(mask, ~mask)
col = np.array(colors.colorConverter.to_rgba(color))
I = col[:, None, None].T*m[:, :, None]
ax.imshow(I, **kargs)
def normP(x, p, trunk=True):
"""
Normalize the input data accroding to its percentile value.
Parameters
----------
x : 2D numpy array
input data
p : float
percentile to normalize the data.
lower bound = p percentile
upper bound = (100-p) percentile
trunk : bool
If True the data are truncated between 0 and 1
"""
thresh_high = np.percentile(x, 100-p)
thresh_low = np.percentile(x, p)
if thresh_low == thresh_high:
thresh_high = np.max(x)
thresh_low = np.min(x)
if thresh_low == thresh_high:
thresh_high = thresh_low+1
r = (x-thresh_low)/(thresh_high-thresh_low)
if trunk:
r[r < 0] = 0
r[r > 1] = 1
return r
def beam_profile(target, source, mu=1e-6, tukey=0, meanCorr=False, source_tukey=None, real=np.abs, **kargs):
"""
Calculate the PSF by deconvolution of the target
with the source using a Tikhonov regularization of factor mu.
"""
if source_tukey is None:
source_tukey = tukey
if kargs.get('source_centering', False):
source = 2*source-1
if meanCorr:
target = target-np.mean(target)
if tukey>0:
target = tukeyfy(target, tukey)
if source_tukey>0:
source = tukeyfy(source, tukey)
tf = np.fft.fft2(source)
tf /= np.size(tf)
recon_tf = np.conj(tf) / (np.abs(tf)**2 + mu)
return np.fft.fftshift(real(np.fft.ifft2(np.fft.fft2(target) * recon_tf)))/np.size(target)
def beam_profile1d(target, source, mu=1e-6, real=np.abs):
source = source
tf = np.fft.fft(source)
tf /= np.size(tf)
recon_tf = np.conj(tf) / (np.abs(tf)**2 + mu)
F = np.fft.fft(target) * recon_tf
return np.fft.fftshift(real(np.fft.ifft(F))), F
def zoom_center(img, sx, sy=None):
"""
Zoom by taking the sx × sy central pixels
Parameters
----------
img : 2D numpy array
The input data
sx : int
The number of pixels along the x-axis to take
sy : int or None
The number of pixels alongs the y-axis to take.
If None take the same value as for sx
"""
if sy is None:
sy = sx
assert type(sx) is int
assert type(sy) is int
return img[
img.shape[0]//2-sy//2: img.shape[0]//2 + sy//2,
img.shape[1]//2-sx//2: img.shape[1]//2 + sx//2]
def px2real(x, y, size, ext):
rx = ext[0]+(x/size[1])*(ext[1]-ext[0])
ry = ext[2]+(y/size[0])*(ext[3]-ext[2])
return rx, ry
def real2px(x, y, size, ext):
px = size[1]*(x-ext[0])/(ext[1]-ext[0])
py = size[0]*(y-ext[2])/(ext[3]-ext[2])
return px, py
def fit2d(Z0, dx=2, dy=1, mask=None):
"""
Fit the input data with a 2D polynom of order dx × dy
Parameters
----------
Z0 : 2D numpy array
input data
dx : int
order of the polynom for the x-axis
dy : int
order of the polynom for the y-xis
mask : 2D numpy array
Give a mask where True values only will be used to perform the fitting
Returns
-------
numpy array
fitting parameters
2D numpy array
result of the polynom
"""
x = np.arange(Z0.shape[1], dtype=np.float)
y = np.arange(Z0.shape[0], dtype=np.float)
X0, Y0 = np.meshgrid(x, y)
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
x2 = X.ravel()
y2 = Y.ravel()
A = np.vstack([x2**i for i in range(dx+1)])
A = np.vstack([A]+[y2**i for i in range(1, dy+1)])
res = scipy.optimize.lsq_linear(A.T, Z.ravel())
r = res['x']
Z2 = r[0]*np.ones(Z0.shape)
for i in range(1, dx+1):
Z2 += r[i]*(X0**i)
for i in range(1, dy+1):
Z2 += r[dx+i]*(Y0**i)
return r, Z2
def warp_and_cut(img, tform, cut=True):
"""
Perform an Affine transform on the input data and cut them if cut=True
Parameters
----------
img : 2D numpy array
input data
tform : skimage.transform
An Affine fransform to perform on the data
cut : bool
Should the data be cutted?
"""
New = tf.warp(img, tform, preserve_range=True)
Cut = [0, 0] + list(img.shape)
if tform.translation[0] >= 0:
Cut[2] -= tform.translation[0]
elif tform.translation[0] < 0:
Cut[0] -= tform.translation[0]
if tform.translation[1] >= 0:
Cut[1] += tform.translation[1]
elif tform.translation[1] < 0:
Cut[3] += tform.translation[1]
Cut = [int(x) for x in Cut]
if cut:
New = cut(New, Cut)
return New, Cut
def get_profile(I, x1, y1, x2, y2, width=0, ax=None, color='w', alpha=0, N=None,\
transx=lambda x: x, transy=lambda x: x, interp_order=1, **kargs):
"""
Get a profile from an input matrix.
Low-level function. Doc will come laters2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))
"""
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
if N is None:
N = int(d)+1
P = []
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
for w in np.linspace(-width/2, width/2, max(1,width)):
dx = -w*(y2-y1)/d
dy = w*(x2-x1)/d
x = np.linspace(x1+dx, x2+dx, N)
y = np.linspace(y1+dy, y2+dy, N)
M = scipy.ndimage.interpolation.map_coordinates(I, np.vstack((y, x)), order=interp_order)
P.append(M)
if kargs.get('debug',False):
print("get_profile input coordinates:",x1,y1,x2,y2)
if not ax is None:
x1 = transx(x1)
x2 = transx(x2)
y1 = transy(y1)
y2 = transy(y2)
if kargs.get('debug',False):
print("Drawing coordinates:",x1,y1,x2,y2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if type(color) in [tuple, list]:
ax.plot([x1, x2], [y1, y2], color=color, alpha=kargs.get('linealpha',1))
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], color=color, alpha=kargs.get('linealpha',1))
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], color=color, alpha=kargs.get('linealpha',1))
else:
ax.plot([x1, x2], [y1, y2], color, alpha=kargs.get('linealpha',1), lw=kargs.get('lw',1))
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], color, alpha=kargs.get('linealpha',1))
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], color, alpha=kargs.get('linealpha',1))
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle(
(x1+dx,y1+dy),
2*np.sqrt(dx**2+dy**2),
np.sqrt((x2-x1)**2+(y2-y1)**2),
-np.degrees(np.arctan2(x2-x1,y2-y1)), color=color, alpha=alpha))
if len(P)==1:
return np.linspace(0, d, N), P[0]
return np.linspace(0, d, N), np.vstack(P).T
def dist_v2(img, dx=1, dy=1):
"""
Return a 2D array with the distance in pixel with the clothest corner of the array.
"""
x2 = np.arange(img.shape[1])
x2 = (np.minimum(x2, img.shape[1]-x2) * dx)**2
y2 = np.arange(img.shape[0])
y2 = (np.minimum(y2, img.shape[0] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def generate_k_matrices(A, dx, dy):
"""
GENERATE_K_MATRICES k-Matrix generation (helper function).
generates k-matrices for the 2D-channel CHANNEL.
K is a matrix of the same size as the pixel matrix A, containing the real-life frequency distance of each
pixel position to the nearest corner of an matrix that is one pixel
wider/higher.
KX is of the same size as K and contains the real-life difference in x-direction of each pixel position to the nearest corner
of a matrix that is one pixel wider/higher.
Similarly, KY is of the same size as K, containing the real-life difference in y-direction of each pixel position to the nearest corner of an matrix that is one
pixel wider/higher.
"""
ny, nx = A.shape
dkx = 2*np.pi/(nx*dx)
dky = 2*np.pi/(ny*dy)
ky = np.arange(0, ny);
ky = (np.mod(ky+ny/2, ny) - ny/2) * dky
kx = np.arange(0, nx);
kx = (np.mod(kx+nx/2, nx) - nx/2) * dkx
kx, ky = np.meshgrid(kx, ky)
k = dist_v2(A, dkx, dky)
k[0, 0] = 1.0 # Prevent division by zero error and illegal operand errors. This may be improved...
return k, kx, ky
def mfm_tf(nx, dx, ny, dy, tf_in, derivative=0, transform=0, z=0, A=0, theta=None, phi=None, d=None, delta_w=None):
"""
Draft for the MFM tf function
"""
k, kx, ky = generate_k_matrices(tf_in, dx, dy)
# Distance loss
tf_out = np.exp(-z*k)
if d is not None:
tf_out = tf_out / 2.0
if not np.isinf(d):
if d == 0:
tf_out *= k
else:
tf_out *= 1 - np.exp(-d*k)
if A == 0:
if transform != 0:
assert theta is not None
assert phi is not None
tf_out *= ((np.cos(theta)+1j*(np.cos(phi)*np.sin(-theta)*kx+np.sin(phi)*np.sin(-theta)*ky)) / k)**transform
if derivative == 1:
tf_out *= k
else:
pass # TODO
return tf_out * tf_in
def mfm_inv_calc_flat(img, z, tf_in, thickness=None, delta_w=None, amplitude=0, derivative=0, transform=0, mu=1e-8):
"""
MFM inv calc function
"""
theta = np.radians(12)
phi = np.radians(-90)
ny, nx = img.shape
tf = mfm_tf(nx, 1, ny, 1, tf_in, derivative, transform, z, amplitude, theta, phi, thickness, delta_w)
tf[0,0] = np.real(np.mean(tf))
recon_tf = np.conj(tf) / (mu+ | np.abs(tf) | numpy.abs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.