prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
def statistics(data_dict):
stat_dict = {}
for i in data_dict:
data = data_dict[i]
data = data[data != -1]
if len(data)>0:
mean_ = np.mean(data)
median_ = np.median(data)
min_ = np.amin(data)
max_ = | np.amax(data) | numpy.amax |
import numpy as np
class LinearRegression:
def fit(self, X, y):
X = np.c_[np.ones((X.shape[0], 1)), X]
self.beta = | np.linalg.inv(X.T @ X) | numpy.linalg.inv |
import subprocess
from inspect import getmembers, isclass
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from thequickmath.field import *
def cell_heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels (copied from matplotlib web-site!).
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
# plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
# rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def build_zooming_axes(fig, parent_ax, parent_point, child_box, connecting_vertices):
po_ax = plt.axes(child_box)
po_ax.get_xaxis().set_visible(False)
po_ax.get_yaxis().set_visible(False)
po_ax_bbox = po_ax.get_position()
ax_bbox = parent_ax.get_position()
combined_transform = fig.transFigure + parent_ax.transData.inverted()
data_to_figure = combined_transform.inverted()
parent_point_fig = data_to_figure.transform(parent_point)
vertices_coords = (
(po_ax_bbox.x0, po_ax_bbox.y0),
(po_ax_bbox.x0, po_ax_bbox.y1),
(po_ax_bbox.x1, po_ax_bbox.y1),
(po_ax_bbox.x1, po_ax_bbox.y0),
)
parent_ax.plot([parent_point_fig[0], vertices_coords[connecting_vertices[0]][0]], [parent_point_fig[1], vertices_coords[connecting_vertices[0]][1]], 'k', alpha=0.5, transform=fig.transFigure)
parent_ax.plot([parent_point_fig[0], vertices_coords[connecting_vertices[1]][0]], [parent_point_fig[1], vertices_coords[connecting_vertices[1]][1]], 'k', alpha=0.5, transform=fig.transFigure)
return po_ax
def build_zooming_axes_for_plotting(fig, parent_ax, parent_point, child_box, connecting_vertices):
po_ax = plt.axes(child_box)
po_ax_bbox = po_ax.get_position()
ax_bbox = parent_ax.get_position()
combined_transform = fig.transFigure + parent_ax.transData.inverted()
data_to_figure = combined_transform.inverted()
parent_point_fig = data_to_figure.transform(parent_point)
vertices_coords = (
(po_ax_bbox.x0, po_ax_bbox.y0),
(po_ax_bbox.x0, po_ax_bbox.y1),
(po_ax_bbox.x1, po_ax_bbox.y1),
(po_ax_bbox.x1, po_ax_bbox.y0),
)
parent_ax.plot([parent_point_fig[0], vertices_coords[connecting_vertices[0]][0]], [parent_point_fig[1], vertices_coords[connecting_vertices[0]][1]], 'k', alpha=0.5, transform=fig.transFigure)
parent_ax.plot([parent_point_fig[0], vertices_coords[connecting_vertices[1]][0]], [parent_point_fig[1], vertices_coords[connecting_vertices[1]][1]], 'k', alpha=0.5, transform=fig.transFigure)
return po_ax
def build_zooming_axes_for_plotting_with_box(fig, parent_ax, parent_box, child_box, parent_vertices, child_vertices, remove_axis=False):
combined_transform = fig.transFigure + parent_ax.transData.inverted()
data_to_figure = combined_transform.inverted()
child_box_fig = (
data_to_figure.transform((child_box[0], child_box[1])),
data_to_figure.transform((child_box[0], child_box[1] + child_box[3])),
data_to_figure.transform((child_box[0] + child_box[2], child_box[1] + child_box[3])),
data_to_figure.transform((child_box[0] + child_box[2], child_box[1])),
)
po_ax = plt.axes((child_box_fig[0][0], child_box_fig[0][1], child_box_fig[3][0] - child_box_fig[0][0], child_box_fig[1][1] - child_box_fig[0][1]))
if remove_axis:
po_ax.get_xaxis().set_visible(False)
po_ax.get_yaxis().set_visible(False)
po_ax_bbox = po_ax.get_position()
ax_bbox = parent_ax.get_position()
#parent_point_fig = data_to_figure.transform(parent_point)
parent_vertices_fig = (
data_to_figure.transform((parent_box[0], parent_box[1])),
data_to_figure.transform((parent_box[0], parent_box[1] + parent_box[3])),
data_to_figure.transform((parent_box[0] + parent_box[2], parent_box[1] + parent_box[3])),
data_to_figure.transform((parent_box[0] + parent_box[2], parent_box[1])),
)
child_vertices_fig = (
(po_ax_bbox.x0, po_ax_bbox.y0),
(po_ax_bbox.x0, po_ax_bbox.y1),
(po_ax_bbox.x1, po_ax_bbox.y1),
(po_ax_bbox.x1, po_ax_bbox.y0),
)
parent_ax.plot([parent_box[0], parent_box[0] + parent_box[2]], [parent_box[1], parent_box[1]], 'k',
linewidth=1, alpha=0.5)
parent_ax.plot([parent_box[0], parent_box[0] + parent_box[2]], [parent_box[1] + parent_box[3], parent_box[1] + parent_box[3]], 'k',
linewidth=1, alpha=0.5)
parent_ax.plot([parent_box[0], parent_box[0]], [parent_box[1], parent_box[1] + parent_box[3]], 'k',
linewidth=1, alpha=0.5)
parent_ax.plot([parent_box[0] + parent_box[2], parent_box[0] + parent_box[2]], [parent_box[1], parent_box[1] + parent_box[3]], 'k',
linewidth=1, alpha=0.5)
parent_ax.plot([parent_vertices_fig[parent_vertices[0]][0], child_vertices_fig[child_vertices[0]][0]],
[parent_vertices_fig[parent_vertices[0]][1], child_vertices_fig[child_vertices[0]][1]], 'k',
alpha=0.5, linewidth=1, transform=fig.transFigure)
parent_ax.plot([parent_vertices_fig[parent_vertices[1]][0], child_vertices_fig[child_vertices[1]][0]],
[parent_vertices_fig[parent_vertices[1]][1], child_vertices_fig[child_vertices[1]][1]], 'k',
alpha=0.5, linewidth=1, transform=fig.transFigure)
return po_ax
def put_fields_on_axes(f, ax_zx=None, ax_zy=None, enable_quiver=True, vertical=False):
def _prepare_zx(field_):
#y_averaged_field = average(field_, ['u', 'v', 'w'], 'y')
y_averaged_field = at(field_, 'y', 0.0)
y_averaged_field.change_space_order(['z', 'x'])
return y_averaged_field, filter(filter(filter(y_averaged_field, 'z', 0.5), 'z', 0.5), 'x', 0.5)
def _prepare_zy(field_):
x_averaged_field = average(field_, ['u', 'v', 'w'], 'x')
x_averaged_field.change_space_order(['z', 'y'])
return x_averaged_field, filter(filter(filter(filter(x_averaged_field, 'z', 0.5), 'z', 0.5), 'y', 0.5), 'y', 0.5)
def _plot_contours_and_arrow(ax_, X_cont, Y_cont, cont_field_raw, X_quiv, Y_quiv, quiv_field_X, quiv_field_Y, arrow_scale, enable_quiver):
cvals = 50
cont = ax_.contourf(X_cont, Y_cont, cont_field_raw, cvals, cmap=matplotlib.cm.jet)#, vmin=-0.6, vmax=0.6)
if enable_quiver:
ax_.quiver(X_quiv, Y_quiv, quiv_field_X, quiv_field_Y, # assign to var
color='Teal',
scale=arrow_scale,
headlength=3)
# linewidth=0.05)
return cont
zx_field, zx_field_quiv = _prepare_zx(f)
zy_field, zy_field_quiv = _prepare_zy(f)
# Generate data for plotting
if vertical:
X_zx, Y_zx = np.meshgrid(zx_field.space.elements[1], zx_field.space.elements[0], indexing='ij')
X_zy, Y_zy = np.meshgrid(zy_field.space.elements[1], zy_field.space.elements[0], indexing='ij')
else:
X_zx, Y_zx = np.meshgrid(zx_field.space.elements[0], zx_field.space.elements[1], indexing='ij')
X_zy, Y_zy = np.meshgrid(zy_field.space.elements[0], zy_field.space.elements[1], indexing='ij')
X_zx_quiv, Y_zx_quiv = np.meshgrid(zx_field_quiv.space.elements[0], zx_field_quiv.space.elements[1], indexing='ij')
X_zy_quiv, Y_zy_quiv = | np.meshgrid(zy_field_quiv.space.elements[0], zy_field_quiv.space.elements[1], indexing='ij') | numpy.meshgrid |
import numpy as np
from scipy import interpolate
from Engine.detect_peaks import detect_peaks
#import matplotlib.pyplot as plt
def A0cont(a0wavecut,a0vcut,night,order,band):
'''
Performs first-pass fit to blaze shape of telluric standard spectrum.
Inputs:
a0wavecut : Wavelength scale of telluric standard spectrum
a0vcut : Corresponding flux of telluric standard spectrum
night : Date of observation in YYYYMMDD
order : Echelle order, as characterized by file index (as opposed to m number; for conversion between the two, see Stahl et al. 2021)
band : H or K band
Outputs:
continuum : Best fit blaze model
'''
# a0vcut is a0fluxlist
x = np.arange(len(a0vcut))
# mpd: detect peaks that are at least separated by minimum peak distance
peaks = detect_peaks(a0vcut, mpd=10)
if band == 'H':
xtimes = 1
else:
xtimes = 3
for ii in range(xtimes):
mask = np.ones(len(peaks), dtype=bool)
f = np.polyfit(x[peaks],a0vcut[peaks],4)
q = np.poly1d(f)
residual = a0vcut[peaks]-q(x[peaks])
MAD = np.median(np.abs(residual-np.median(residual)))
'''
plt.figure(figsize=(20,12))
plt.plot(x,a0vcut,color='black',alpha=.5)
plt.scatter(x[peaks],a0vcut[peaks],s=25,color='blue')
plt.plot(x[peaks],q(x[peaks]),color='red')
plt.plot(x[peaks],q(x[peaks])+3*MAD,color='orange')
plt.plot(x[peaks],q(x[peaks])-5*MAD,color='orange')
plt.savefig(inparam.outpath+'/A0 Contfit_'+str(night)+'_'+str(order)+'_'+str(masterbeam)+'_0')
plt.clf()
plt.close()
'''
mask[(a0vcut[peaks]/ | np.nanmedian(a0vcut) | numpy.nanmedian |
'''
MergePlanner.py
Contains methods necessary for advanced selection of which components to merge.
'''
import numpy as np
from collections import defaultdict
from bnpy.util import isEvenlyDivisibleFloat
import bnpy.mergemove.MergeLogger as MergeLogger
# Constant defining how far calculated ELBO gap can be from zero
# and still be considered accepted or favorable
from bnpy.mergemove.MergeMove import ELBO_GAP_ACCEPT_TOL
CountTracker = defaultdict(int)
def preselectPairs(curModel, SS, lapFrac,
mergePairSelection='wholeELBO',
prevScoreMat=None,
mergeScoreRefreshInterval=10,
mergeMaxDegree=5, **kwargs):
''' Create list of candidate pairs for merge
'''
needRefresh = isEvenlyDivisibleFloat(lapFrac, mergeScoreRefreshInterval)
if prevScoreMat is None or needRefresh:
ScoreMat = np.zeros((SS.K, SS.K))
doAllPairs = 1
else:
assert prevScoreMat.shape[0] == SS.K
ScoreMat = prevScoreMat
doAllPairs = 0
ScoreMat = updateScoreMat_wholeELBO(ScoreMat, curModel, SS, doAllPairs)
posMask = ScoreMat > - ELBO_GAP_ACCEPT_TOL
Nvec = SS.getCountVec()
tinyVec = Nvec < 25
tinyMask = np.add(tinyVec, tinyVec[:, np.newaxis])
posAndTiny = np.logical_and(posMask, tinyMask)
posAndBothBig = np.logical_and(posMask, 1 - tinyMask)
# Select list of pairs to track for merge
# prioritizes merges that make big changes
# avoids tracking too many pairs that involves same node
pairsBig = selectPairsUsingAtMostNOfEachComp(posAndBothBig,
N=mergeMaxDegree)
scoresBig = np.asarray([ScoreMat[a, b] for (a, b) in pairsBig])
pairsBig = [pairsBig[x] for x in np.argsort(-1 * scoresBig)]
pairsTiny = selectPairsUsingAtMostNOfEachComp(posAndTiny, pairsBig,
N=mergeMaxDegree,
Nextra=2)
scoresTiny = np.asarray([ScoreMat[a, b] for (a, b) in pairsTiny])
pairsTiny = [pairsTiny[x] for x in np.argsort(-1 * scoresTiny)]
return pairsBig + pairsTiny, ScoreMat
def calcDegreeFromEdgeList(pairIDs, nNode):
''' Calculate degree of each node given edge list
Returns
-------
degree : 1D array, size nNode
degree[k] counts number of edges that node k appears in
'''
degree = np.zeros(nNode, dtype=np.int32)
for n in range(nNode):
degree[n] = np.sum([n in pair for pair in pairIDs])
return degree
def selectPairsUsingAtMostNOfEachComp(AdjMat, extraFixedEdges=None,
N=3, Nextra=0):
'''
Args
--------
AdjMat : 2D array, size K x K
N : max degree of each node
Returns
--------
pairIDs : list of tuples, one entry per selected pair
'''
if np.sum(AdjMat) == 0:
return list()
# AMat :
# tracks all remaining CANDIDATE edges where both node under the degree
# limit.
AMat = AdjMat.copy()
xdegree = np.zeros(AdjMat.shape[0], dtype=np.int32)
if extraFixedEdges is not None:
for kA, kB in extraFixedEdges:
xdegree[kA] += 1
xdegree[kB] += 1
# degree : tracks CANDIDATE edges (including extra) that not excluded
# newdegree : tracks edges we will KEEP
newdegree = np.zeros_like(xdegree)
newdegree += xdegree
exhaustedMask = newdegree >= N
AMat[exhaustedMask, :] = 0
AMat[:, exhaustedMask] = 0
degree = np.sum(AMat, axis=0) + np.sum(AMat, axis=1) + xdegree
# Traverse comps from largest to smallest degree
pairIDs = list()
nodeOrder = np.argsort(-1 * degree)
for nodeID in nodeOrder:
# Get list of remaining possible partners for node
partners = np.flatnonzero(AMat[nodeID, :] + AMat[:, nodeID])
# Sort node's partners from smallest to largest degree,
# since we want to prioritize keeping small degree partners
partners = partners[np.argsort([degree[p] for p in partners])]
Ncur = N - newdegree[nodeID]
keepPartners = partners[:Ncur]
rejectPartners = partners[Ncur:]
for p in keepPartners:
kA = np.minimum(p, nodeID)
kB = np.maximum(p, nodeID)
pairIDs.append((kA, kB))
AMat[kA, kB] = 0 # make pair ineligible for future partnerships
newdegree[p] += 1
newdegree[nodeID] += 1
for p in rejectPartners:
kA = np.minimum(p, nodeID)
kB = np.maximum(p, nodeID)
AMat[kA, kB] = 0 # make pair ineligible for future partnerships
degree[p] -= 1
degree[nodeID] -= 1
exhaustedMask = newdegree >= N
AMat[exhaustedMask, :] = 0
AMat[:, exhaustedMask] = 0
degree = np.sum(AMat, axis=0) + np.sum(AMat, axis=1) + xdegree
cond1 = np.allclose(degree, xdegree)
cond2 = np.max(newdegree) <= N + Nextra
if not cond1:
print('WARNING: BAD DEGREE CALCULATION')
if not cond2:
print('WARNING: BAD NEWDEGREE CALCULATION')
print('max(newdegree)=%d' % (np.max(newdegree)))
print('N + Nextra: %d' % (N + Nextra))
return pairIDs
def updateScoreMat_wholeELBO(ScoreMat, curModel, SS, doAllPairs=0):
''' Calculate upper-tri matrix of exact ELBO gap for each candidate pair
Returns
---------
Mraw : 2D array, size K x K. Uppert tri entries carry content.
Mraw[j,k] gives the scalar ELBO gap for the potential merge of j,k
'''
K = SS.K
if doAllPairs:
AGap = curModel.allocModel.calcHardMergeGap_AllPairs(SS)
OGap = curModel.obsModel.calcHardMergeGap_AllPairs(SS)
ScoreMat = AGap + OGap
ScoreMat[np.tril_indices(SS.K)] = -np.inf
for k, uID in enumerate(SS.uIDs):
CountTracker[uID] = SS.getCountVec()[k]
nUpdated = SS.K * (SS.K - 1) / 2
else:
ScoreMat[np.tril_indices(SS.K)] = -np.inf
# Rescore only specific pairs that are positive
redoMask = ScoreMat > -1 * ELBO_GAP_ACCEPT_TOL
for k, uID in enumerate(SS.uIDs):
if CountTracker[uID] == 0:
# Always precompute for brand-new comps
redoMask[k, :] = 1
redoMask[:, k] = 1
else:
absDiff = np.abs(SS.getCountVec()[k] - CountTracker[uID])
percDiff = absDiff / (CountTracker[uID] + 1e-10)
if percDiff > 0.25:
redoMask[k, :] = 1
redoMask[:, k] = 1
CountTracker[uID] = SS.getCountVec()[k]
redoMask[np.tril_indices(SS.K)] = 0
aList, bList = np.unravel_index( | np.flatnonzero(redoMask) | numpy.flatnonzero |
import numpy as np
def orthogonal_procrustes_rotation(canonical_points, observed_points):
'''
canonical_points: (N, 3) array
observed_points: (N, 3) array
'''
M_matrix = np.dot(observed_points.T, canonical_points)
U, S, Vh = np.linalg.svd(M_matrix)
R = np.dot(U, Vh)
if np.linalg.det(R) < 0:
S_mod = np.eye(3)
S_mod[-1, -1] = -1
R = np.dot(np.dot(U, S_mod), Vh)
return R
def orthogonal_procrustes(canonical_points, observed_points):
'''
canonical_points: (N, 3) array
observed_points: (N, 3) array
'''
canonical_points_mean = np.mean(canonical_points, axis=0)
observed_points_mean = np.mean(observed_points, axis=0)
observed_points_ = observed_points - observed_points_mean
canonical_points_ = canonical_points - canonical_points_mean
R = orthogonal_procrustes_rotation(canonical_points_, observed_points_)
t = observed_points_mean - np.dot(R, canonical_points_mean.T)
mean_error = np.mean(np.linalg.norm(observed_points - (np.dot(R, canonical_points.T).T + t), axis=-1))
return R, t, mean_error
def batch_orthogonal_procrustes_rotation(canonical_points, observed_points, num_valid):
'''
canonical_points: (B, N, 3) array
observed_points: (B, N, 3) array
num_valid: (B, ) array
'''
M_matrix = np.matmul(np.transpose(observed_points, [0, 2, 1]), canonical_points)
print(M_matrix)
U, S, Vh = | np.linalg.svd(M_matrix) | numpy.linalg.svd |
'''
File contains functions used when training the NNs
'''
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout
from tensorflow.keras.optimizers import Adam
import os
import pathlib
class UniformScaler:
'''
Class for a simple uniform scaler. Linearly transforms X such that all
samples in X are in the range [0,1].
'''
min_val = 0
diff = 1
def fit(self, X):
'''
Fit the parameters of the transformer based on the training data.
Args:
X (array) : The training data. Must have shape (nsamps, nfeatures).
'''
# Check shape of X.
if len(X.shape) != 2:
raise ValueError("X does not have the correct shape. Must have shape (nsamps, nfeatures)")
# Calculate min. value and largest diff. of all samples of X along the
# 0th axis. Both min_val and diff can be vectors if required.
self.min_val = np.min(X, axis=0)
self.diff = np.max(X, axis=0) - np.min(X, axis=0)
def transform(self, X):
'''
Transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the transformed data.
'''
x = np.subtract(X, self.min_val)
return np.true_divide(x, self.diff)
def inverse_transform(self, X):
'''
Inverse transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the inverse transformed data.
'''
x = np.multiply(X, self.diff)
return np.add(x, self.min_val)
class LogScaler:
'''
Class for a log scaler. Linearly transforms logX such that all samples in
logX are in the range [0,1].
'''
min_val = 0
diff = 1
def fit(self, X):
'''
Fit the parameters of the transformer based on the training data.
Args:
X (array) : The training data. Must have shape (nsamps, nfeatures).
'''
# Check shape of X.
if len(X.shape) != 2:
raise ValueError("X does not have the correct shape. Must have shape (nsamps, nfeatures)")
# Make sure there are no negative values or zeros.
if np.any(X<=0.):
raise ValueError("X contains negative values or zeros.")
X = np.log(X)
# Calculate min. value and largest diff. of all samples of X along the
# 0th axis. Both min_val and diff can be vectors if required.
self.min_val = np.min(X, axis=0)
self.diff = np.max(X, axis=0) - np.min(X, axis=0)
def transform(self, X):
'''
Transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the transformed data.
'''
X = np.log(X)
x = np.subtract(X, self.min_val)
return np.true_divide(x, self.diff)
def inverse_transform(self, X):
'''
Inverse transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the inverse transformed data.
'''
x = np.multiply(X, self.diff)
return np.exp(np.add(x, self.min_val))
class StandardScaler:
'''
Replacement for sklearn StandardScaler(). Rescales X such that it has zero
mean and unit variance.
'''
mean = 0
scale = 1
def fit(self, X):
'''
Fit the parameters of the transformer based on the training data.
Args:
X (array) : The training data. Must have shape (nsamps, nfeatures).
'''
# Check shape of X.
if len(X.shape) != 2:
raise ValueError("X does not have the correct shape. Must have shape (nsamps, nfeatures).")
# Calculate the mean and strandard deviation of X along the 0th axis.
# Can be vectors if needed.
self.mean = np.mean(X, axis=0)
self.scale = np.std(X, axis=0)
def transform(self, X):
'''
Transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the transformed data.
'''
x = np.subtract(X, self.mean)
return np.true_divide(x, self.scale)
def inverse_transform(self, X):
'''
Inverse transform the data.
Args:
X (array) : The data to be transformed.
Returns:
Array containing the inverse transformed data.
'''
x = np.multiply(X, self.scale)
return np.add(x, self.mean)
class Resampler:
'''
Class for re-sampling the parameter space covered by a suite of simulations.
The new samples can then be used to generate training data for the base model
componenet emulators.
.. note::
See the `Generating training samples for the base model componenets
<../example_notebooks/resample_example.ipynb>`_ example.
Args:
simulation_samples (array) : The samples in the parameter space from the
simulation suite. Default is None.
parameter_ranges (array) : Ranges that define the extent of the parameter
space. Should have shape (n, 2), where the first column is the minimum
value for the n parameters, and the second column is the maximum.
Default is None.
use_latent_space (bool): If True the origonal simulation samples will be
transfromed into an uncorrelated latent space for re-sampling. Default
is False.
'''
def __init__(self, simulation_samples=None, parameter_ranges=None,
use_latent_space=False):
# Make sure the user has passed either simulation_samples or parameter_ranges.
if (simulation_samples is None) and (parameter_ranges is None):
raise ValueError("Please provide either simulation samples or parameter ranges.")
elif (parameter_ranges is None) and (use_latent_space is False):
self.min = | np.min(simulation_samples, axis=0) | numpy.min |
# -*- coding: UTF-8 -*-
"""
:Script: field_stats.py
:Author: <EMAIL>
:Modified: 2017-07-08
:Purpose: Descriptive statistics for tables using numpy.
:
:References:
: https://github.com/numpy/numpy/blob/master/numpy/lib/nanfunctions.py
: _replace_nan(a, val) - mask = np.isnan(a) - to get the mask
:
: a = [1, 2, np.nan, 3, np.nan, 4]
: _, mask = _replace_nan(a, 0) # for mean
: mask = array([False, False, True, False, True, False], dtype=bool)
:
: ---------------------------------------------------------------------:
"""
# ---- imports, formats, constants ----
import sys
import numpy as np
import arcpy
ft = {'bool': lambda x: repr(x.astype('int32')),
'float': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=10, linewidth=80, precision=2, suppress=True,
threshold=100, formatter=ft)
np.ma.masked_print_option.set_display('-') # change to a single -
script = sys.argv[0] # print this should you need to locate the script
# ---- skewness and kurtosis section -----------------------------------------
def skew_kurt(a, avg, var_x, std_x, col=True, mom='both'):
"""Momental and unbiased skewness
:Emulates the nan functions approach to calculating these parameters
:when data contains nan values.
:Requires:
:---------
: a - an array of float/double values where there are at least 3 non-nan
: numbers in each column. This is not checked since this situation
: should never arise in real world data sets that have been checked.
: moment - both, skew or kurt to return the moments
:Notes:
:------
: a= np.arange(16.).reshape(4,4)
: mask = [0, 5, 10, 15]
: masked_array = np.where(a == mask, np.nan, a)
"""
# a, mask = _replace_nan(a, 0.) # produce a masked of the nan values
if len(a.shape) == 1:
ax = 0
else:
ax = [1, 0][col]
# # ---- mean section ----
mask = np.isnan(a)
cnt = np.sum(~mask, axis=ax, dtype=np.intp, keepdims=False)
diff = a - avg
sqrd = diff * diff
cubed = sqrd * diff
fourP = sqrd * sqrd
x_3 = np.nansum(cubed, axis=ax)
x_4 = np.nansum(fourP, axis=ax)
skew_m = x_3 / (cnt * (std_x**3))
kurt_m = x_4 / (cnt * (var_x * var_x))
# skew_u = skew_m*((cnt**2)/((cnt-1)*(cnt-2))) # could add if needed
if mom == 'skew':
return skew_m
elif mom == 'kurt':
return kurt_m
elif mom == 'both':
return skew_m, kurt_m
# -----------------------------------------------------------------------------
# functions
def tweet(msg):
"""Produce a message (msg)for both arcpy and python
"""
m = "{}".format(msg)
arcpy.AddMessage(m)
print(m)
print(arcpy.GetMessages())
def cal_stats(in_fc, col_names):
"""Calculate stats for an array of double types, with nodata (nan, None)
: in the column.
:Requires:
:---------
: in_fc - input featureclass or table
: col_names - the columns... numeric (floating point, double)
:
:Notes:
:------ see the args tuple for examples of nan functions
: np.nansum(b, axis=0) # by column
: np.nansum(b, axis=1) # by row
: c_nan = np.count_nonzero(~np.isnan(b), axis=0) count nan if needed
"""
a = arcpy.da.FeatureClassToNumPyArray(in_fc, col_names) # "*")
b = a.view(np.float).reshape(len(a), -1)
if len(a.shape) == 1:
ax = 0
else:
ax = [1, 0][True] # ax = [1, 0][colwise] colwise= True
mask = np.isnan(b)
cnt = np.sum(~mask, axis=ax, dtype=np.intp, keepdims=False)
n_sum = np.nansum(b, axis=0)
n_mean = np.nanmean(b, axis=0)
n_var = np.nanvar(b, axis=0)
n_std = np.nanstd(b, axis=0)
sk, kurt = skew_kurt(b, avg=n_mean, var_x=n_var, std_x=n_std, col=True, mom='both')
args = (col_names, n_sum, np.nanmin(b, axis=0), np.nanmax(b, axis=0),
| np.nanmedian(b, axis=0) | numpy.nanmedian |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from PIL import Image
import pickle
# /////////////// Corruption Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from skimage import transform, feature
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
from PIL import ImageDraw as draw
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
import os
from pkg_resources import resource_filename
warnings.simplefilter("ignore", UserWarning)
CORRUPTIONS = ['identity',
'shot_noise',
'impulse_noise',
'glass_blur',
'motion_blur',
'shear',
'scale',
'rotate',
'brightness',
'translate',
'stripe',
'fog',
'spatter',
'dotted_line',
'zigzag',
'canny_edges',]
ALL_CORRUPTIONS = ['identity',
'gaussian_noise',
'shot_noise',
'impulse_noise',
'speckle_noise',
'pessimal_noise',
'gaussian_blur',
'glass_blur',
'defocus_blur',
'motion_blur',
'zoom_blur',
'fog',
'frost',
'snow',
'spatter',
'contrast',
'brightness',
'saturate',
'jpeg_compression',
'pixelate',
'elastic_transform',
'quantize',
'shear',
'rotate',
'scale',
'translate',
'line',
'dotted_line',
'zigzag',
'inverse',
'stripe',
'canny_edges',]
with open("pessimal_noise_matrix", "rb") as f:
pessimal_noise_matrix = pickle.load(f)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / float(zoom_factor)))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
def line_from_points(c0, r0, c1, r1):
if c1 == c0:
return np.zeros((28, 28))
# Decay function defined as log(1 - d/2) + 1
cc, rr = np.meshgrid(np.linspace(0, 27, 28), np.linspace(0, 27, 28), sparse=True)
m = (r1 - r0) / (c1 - c0)
f = lambda c: m * (c - c0) + r0
dist = np.clip(np.abs(rr - f(cc)), 0, 2.3 - 1e-10)
corruption = np.log(1 - dist / 2.3) + 1
corruption = np.clip(corruption, 0, 1)
l = np.int(np.floor(c0))
r = np.int(np.ceil(c1))
corruption[:,:l] = 0
corruption[:,r:] = 0
return np.clip(corruption, 0, 1)
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def identity(x):
return np.array(x, dtype=np.float32)
def gaussian_noise(x, severity=5):
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return x.astype(np.float32)
def shot_noise(x, severity=5):
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return x.astype(np.float32)
def impulse_noise(x, severity=4):
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def speckle_noise(x, severity=5):
c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]
x = np.array(x) / 255.
x = np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return x.astype(np.float32)
def pessimal_noise(x, severity=1):
c = 10.63
x = np.array(x) / 255.
noise = np.random.normal(size=196) @ pessimal_noise_matrix
scaled_noise = noise / np.linalg.norm(noise) * c / 4
tiled_noise = np.tile(scaled_noise.reshape(14, 14), (2, 2))
x = np.clip(x + tiled_noise, 0, 1) * 255
return x.astype(np.float32)
def gaussian_blur(x, severity=2):
c = [1, 2, 3, 4, 6][severity - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def glass_blur(x, severity=1):
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(28 - c[1], c[1], -1):
for w in range(28 - c[1], c[1], -1):
if np.random.choice([True, False], 1)[0]:
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x = np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
return x.astype(np.float32)
def defocus_blur(x, severity=1):
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
x = cv2.filter2D(x, -1, kernel)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def motion_blur(x, severity=1):
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]
output = BytesIO()
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
x = cv2.imdecode(np.frombuffer(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
return np.clip(np.array(x), 0, 255).astype(np.float32)
def zoom_blur(x, severity=5):
c = [np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, severity=5):
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
x = x + c[0] * plasma_fractal(wibbledecay=c[1])[:28, :28]
x = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return x.astype(np.float32)
def frost(x, severity=5):
c = [(1, 0.4),
(0.8, 0.6),
(0.7, 0.7),
(0.65, 0.7),
(0.6, 0.75)][severity - 1]
idx = np.random.randint(5)
filename = [resource_filename(__name__, 'frost/frost1.png'),
resource_filename(__name__, 'frost/frost2.png'),
resource_filename(__name__, 'frost/frost3.png'),
resource_filename(__name__, 'frost/frost4.jpg'),
resource_filename(__name__, 'frost/frost5.jpg'),
resource_filename(__name__, 'frost/frost6.jpg')][idx]
frost = cv2.imread(filename, 0)
# randomly crop and convert to rgb
x_start, y_start = np.random.randint(0, frost.shape[0] - 28), np.random.randint(0, frost.shape[1] - 28)
frost = frost[x_start:x_start + 28, y_start:y_start + 28]
x = np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
return x.astype(np.float32)
def snow(x, severity=5):
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7),
(0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
snow_layer = np.random.normal(size=x.shape, loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer, c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))
snow_layer = cv2.imdecode(np.frombuffer(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
x = c[6] * x + (1 - c[6]) * np.maximum(x, x * 1.5 + 0.5)
x = np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
return x.astype(np.float32)
def spatter(x, severity=4):
c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
(0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0),
(0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
liquid_layer = np.random.normal(size=x.shape, loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# mud spatter
color = 63 / 255. * np.ones_like(x) * m
x *= (1 - m)
return np.clip(x + color, 0, 1) * 255
def contrast(x, severity=4):
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x = np.clip((x - means) * c + means, 0, 1) * 255
return x.astype(np.float32)
def brightness(x, severity=5):
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = sk.color.gray2rgb(x)
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
x = sk.color.rgb2gray(x)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def saturate(x, severity=5):
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]
x = np.array(x) / 255.
x = sk.color.gray2rgb(x)
x = sk.color.rgb2hsv(x)
x = np.clip(x * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
x = sk.color.rgb2gray(x)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def jpeg_compression(x, severity=5):
c = [25, 18, 15, 10, 7][severity - 1]
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return np.array(x).astype(np.float32)
def pixelate(x, severity=3):
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
x = x.resize((int(28 * c), int(28 * c)), PILImage.BOX)
x = x.resize((28, 28), PILImage.BOX)
return np.array(x).astype(np.float32)
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, severity=1):
c = [(28 * 2, 28 * 0.7, 28 * 0.1),
(28 * 2, 28 * 0.08, 28 * 0.2),
(28 * 0.05, 28 * 0.01, 28 * 0.02),
(28 * 0.07, 28 * 0.01, 28 * 0.02),
(28 * 0.12, 28 * 0.01, 28 * 0.02)][severity - 1]
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
# random affine
center_square = np.float32(shape) // 2
square_size = min(shape) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape, borderMode=cv2.BORDER_CONSTANT)
dx = (gaussian(np.random.uniform(-1, 1, size=shape),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(np.random.uniform(-1, 1, size=shape),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='constant').reshape(shape), 0, 1) * 255
def quantize(x, severity=5):
bits = [5, 4, 3, 2, 1][severity-1]
x = np.array(x).astype(np.float32)
x *= (2 ** bits - 1) / 255.
x = x.round()
x *= 255. / (2 ** bits - 1)
return x
def shear(x, severity=2):
c = [0.2, 0.4, 0.6, 0.8, 1.][severity-1]
# Randomly switch directions
bit = np.random.choice([-1, 1], 1)[0]
c *= bit
aff = transform.AffineTransform(shear=c)
# Calculate translation in order to keep image center (13.5, 13.5) fixed
a1, a2 = aff.params[0,:2]
b1, b2 = aff.params[1,:2]
a3 = 13.5 * (1 - a1 - a2)
b3 = 13.5 * (1 - b1 - b2)
aff = transform.AffineTransform(shear=c, translation=[a3, b3])
x = np.array(x) / 255.
x = transform.warp(x, inverse_map=aff)
x = np.clip(x, 0, 1) * 255.
return x.astype(np.float32)
def rotate(x, severity=2):
c = [0.2, 0.4, 0.6, 0.8, 1.][severity-1]
# Randomly switch directions
bit = np.random.choice([-1, 1], 1)[0]
c *= bit
aff = transform.AffineTransform(rotation=c)
a1, a2 = aff.params[0,:2]
b1, b2 = aff.params[1,:2]
a3 = 13.5 * (1 - a1 - a2)
b3 = 13.5 * (1 - b1 - b2)
aff = transform.AffineTransform(rotation=c, translation=[a3, b3])
x = np.array(x) / 255.
x = transform.warp(x, inverse_map=aff)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def scale(x, severity=3):
c = [(1/.9, 1/.9), (1/.8, 1/.8), (1/.7, 1/.7), (1/.6, 1/.6), (1/.5, 1/.5)][severity-1]
aff = transform.AffineTransform(scale=c)
a1, a2 = aff.params[0,:2]
b1, b2 = aff.params[1,:2]
a3 = 13.5 * (1 - a1 - a2)
b3 = 13.5 * (1 - b1 - b2)
aff = transform.AffineTransform(scale=c, translation=[a3, b3])
x = np.array(x) / 255.
x = transform.warp(x, inverse_map=aff)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def translate(x, severity=3):
c = [1, 2, 3, 4, 5][severity-1]
bit = np.random.choice([-1, 1], 2)
dx = c * bit[0]
dy = c * bit[1]
aff = transform.AffineTransform(translation=[dx, dy])
x = np.array(x) / 255.
x = transform.warp(x, inverse_map=aff)
x = | np.clip(x, 0, 1) | numpy.clip |
import datajoint as dj
import numpy as np
from . import experiment, ephys, get_schema_name, foraging_analysis
from .model.bandit_model_comparison import BanditModelComparison
schema = dj.schema(get_schema_name('foraging_model'))
@schema
class ModelClass(dj.Lookup):
definition = """
model_class: varchar(32) # e.g. LossCounting, RW1972, Hattori2019
---
desc='': varchar(100)
"""
contents = [
['LossCounting', 'Count the number of losses and switch when the number exceeds a threshold'],
['RW1972', 'Rescorla–Wagner model (single learning rate)'],
['LNP', 'Linear-nonlinear-Poisson (exponential recency-weighted average)'],
['Bari2019', 'Bari et al 2019 (different learning rates for chosen/unchosen)'],
['Hattori2019', 'Hattori et al 2019 (different learning rates for rew/unrew/unchosen)'],
['CANN', "Abstracted from Ulises' continuous attractor neural network"],
['Synaptic', "Abstracted from Ulises' synaptic model"]
]
@schema
class ModelParam(dj.Lookup):
definition = """
model_param: varchar(32) # e.g. learn_rate, epsilon, w_tau1
---
param_notation: varchar(32) # r'$\tau_1$'
"""
contents = [
['loss_count_threshold_mean', r'$\mu_{LC}$'],
['loss_count_threshold_std', r'$\sigma_{LC}$'],
['tau1', r'$\tau_1$'],
['tau2', r'$\tau_2$'],
['w_tau1', r'$w_{\tau_1}$'],
['learn_rate', r'$\alpha$'],
['learn_rate_rew', r'$\alpha_{rew}$'],
['learn_rate_unrew', r'$\alpha_{unr}$'],
['forget_rate', r'$\delta$'],
['softmax_temperature', r'$\sigma$'],
['epsilon', r'$\epsilon$'],
['biasL', r'$b_L$'],
['biasR', r'$b_R$'],
['choice_step_size', r'$\alpha_c$'],
['choice_softmax_temperature', r'$\sigma_c$'],
['tau_cann', r'$\tau_{CANN}$'],
['I0', r'$I_0$'],
['rho', r'$\rho$']
]
@schema
class Model(dj.Manual):
definition = """
model_id: int
---
-> ModelClass
model_notation: varchar(500)
n_params: int # Effective param count
is_bias: bool
is_epsilon_greedy: bool
is_softmax: bool
is_choice_kernel: bool
desc='': varchar(500) # Long name
fit_cmd: blob # Fitting command compatible with the Dynamic-Foraing repo
"""
class Param(dj.Part):
definition = """
-> master
-> ModelParam
---
param_idx: int # To keep params the same order as the original definition in MODELS, hence `fit_result.x`
param_lower_bound: float
param_higher_bound: float
"""
@classmethod
def load_models(cls):
# Original definition from the Dynamic-Foraging repo, using the format: [forager, [para_names], [lower bounds], [higher bounds], desc(optional)]
MODELS = [
# No bias
['LossCounting', ['loss_count_threshold_mean', 'loss_count_threshold_std'],
[0, 0], [40, 10], 'LossCounting: mean, std, no bias'],
['RW1972_epsi', ['learn_rate', 'epsilon'],
[0, 0], [1, 1], 'SuttonBarto: epsilon, no bias'],
['RW1972_softmax', ['learn_rate', 'softmax_temperature'],
[0, 1e-2], [1, 15], 'SuttonBarto: softmax, no bias'],
['LNP_softmax', ['tau1', 'softmax_temperature'],
[1e-3, 1e-2], [100, 15], 'Sugrue2004, Corrado2005: one tau, no bias'],
['LNP_softmax', ['tau1', 'tau2', 'w_tau1', 'softmax_temperature'],
[1e-3, 1e-1, 0, 1e-2], [15, 40, 1, 15], 'Corrado2005, Iigaya2019: two taus, no bias'],
['Bari2019', ['learn_rate', 'forget_rate', 'softmax_temperature'],
[0, 0, 1e-2], [1, 1, 15], 'RL: chosen, unchosen, softmax, no bias'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'softmax_temperature'],
[0, 0, 1e-2], [1, 1, 15], 'RL: rew, unrew, softmax, no bias'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature'],
[0, 0, 0, 1e-2], [1, 1, 1, 15], 'RL: rew, unrew, unchosen, softmax, no bias'],
# With bias
['RW1972_epsi', ['learn_rate', 'epsilon', 'biasL'],
[0, 0, -0.5], [1, 1, 0.5], 'SuttonBarto: epsilon'],
['RW1972_softmax', ['learn_rate', 'softmax_temperature', 'biasL'],
[0, 1e-2, -5], [1, 15, 5], 'SuttonBarto: softmax'],
['LNP_softmax', ['tau1', 'softmax_temperature', 'biasL'],
[1e-3, 1e-2, -5], [100, 15, 5], 'Sugrue2004, Corrado2005: one tau'],
['LNP_softmax', ['tau1', 'tau2', 'w_tau1', 'softmax_temperature', 'biasL'],
[1e-3, 1e-1, 0, 1e-2, -5], [15, 40, 1, 15, 5], 'Corrado2005, Iigaya2019: two taus'],
['Bari2019', ['learn_rate', 'forget_rate', 'softmax_temperature', 'biasL'],
[0, 0, 1e-2, -5], [1, 1, 15, 5], 'RL: chosen, unchosen, softmax'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'softmax_temperature', 'biasL'],
[0, 0, 1e-2, -5], [1, 1, 15, 5], 'RL: rew, unrew, softmax'],
['Hattori2019', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature', 'biasL'],
[0, 0, 0, 1e-2, -5], [1, 1, 1, 15, 5], '(full Hattori) RL: rew, unrew, unchosen, softmax'],
# With bias and choice kernel
['RW1972_softmax_CK', ['learn_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 1e-2, -5, 0, 1e-2], [1, 15, 5, 1, 20], 'SuttonBarto: softmax, choice kernel'],
['LNP_softmax_CK', ['tau1', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[1e-3, 1e-2, -5, 0, 1e-2], [100, 15, 5, 1, 20], 'Sugrue2004, Corrado2005: one tau, choice kernel'],
['LNP_softmax_CK', ['tau1', 'tau2', 'w_tau1', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[1e-3, 1e-1, 0, 1e-2, -5, 0, 1e-2], [15, 40, 1, 15, 5, 1, 20], 'Corrado2005, Iigaya2019: two taus, choice kernel'],
['Bari2019_CK', ['learn_rate', 'forget_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 1e-2, -5, 0, 1e-2], [1, 1, 15, 5, 1, 20], 'RL: chosen, unchosen, softmax, choice kernel'],
['Hattori2019_CK', ['learn_rate_rew', 'learn_rate_unrew', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 1e-2, -5, 0, 1e-2], [1, 1, 15, 5, 1, 20], 'RL: rew, unrew, softmax, choice kernel'],
['Hattori2019_CK', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 0, 1e-2, -5, 0, 1e-2], [1, 1, 1, 15, 5, 1, 20], 'Hattori + choice kernel'],
['Hattori2019_CK', ['learn_rate_rew', 'learn_rate_unrew', 'forget_rate', 'softmax_temperature', 'biasL', 'choice_step_size', 'choice_softmax_temperature'],
[0, 0, 0, 1e-2, -5, 1, 1e-2], [1, 1, 1, 15, 5, 1, 20], 'choice_step_size fixed at 1 --> Bari 2019: only the last choice matters'],
['CANN', ['learn_rate', 'tau_cann', 'softmax_temperature', 'biasL'],
[0, 0, 1e-2, -5], [1, 1000, 15, 5], "Ulises' CANN model, ITI decay, with bias"],
['Synaptic', ['learn_rate', 'forget_rate', 'I0', 'rho', 'softmax_temperature', 'biasL'],
[0, 0, 0, 0, 1e-2, -5], [1, 1, 10, 1, 15, 5], "Ulises' synaptic model"],
['Synaptic', ['learn_rate', 'forget_rate', 'I0', 'rho', 'softmax_temperature', 'biasL'],
[0, 0, 0, -100, 1e-2, -5], [1, 1, 10, 100, 15, 5], "Ulises' synaptic model (unconstrained \\rho)"],
['Synaptic', ['learn_rate', 'forget_rate', 'I0', 'rho', 'softmax_temperature', 'biasL'],
[0, 0, 0, -1e6, 1e-2, -5], [1, 1, 1e6, 1e6, 15, 5], "Ulises' synaptic model (really unconstrained I_0 and \\rho)"],
# ['Synaptic_W>0', ['learn_rate', 'forget_rate', 'I0', 'rho', 'softmax_temperature', 'biasL'],
# [0, 0, 0, -100, 1e-2, -5], [1, 1, 10, 100, 15, 5], "Ulises' synaptic model (W > 0, partially constrained I_0 and \\rho)"],
# ['Synaptic_W>0', ['learn_rate', 'forget_rate', 'I0', 'rho', 'softmax_temperature', 'biasL'],
# [0, 0, 0, -1e6, 1e-2, -5], [1, 1, 10, 1e6, 15, 5], "Ulises' synaptic model (W > 0, unconstrained I_0 and \\rho)"],
]
# Parse and insert MODELS
for model_id, model in enumerate(MODELS):
# Insert Model
model_class = [mc for mc in ModelClass.fetch("model_class") if mc in model[0]][0]
is_bias = True if any(['bias' in param for param in model[1]]) else False
is_epsilon_greedy = True if 'epsilon' in model[1] else False
is_softmax = True if 'softmax_temperature' in model[1] else False
is_choice_kernel = True if 'choice_step_size' in model[1] else False
n_params = 0
param_notation = []
# Insert Model
for param, lb, ub in zip(*model[1:4]):
if lb < ub:
n_params += 1 # Only count effective params
param_notation.append((ModelParam & f'model_param="{param}"').fetch1("param_notation"))
else:
param_notation.append((ModelParam & f'model_param="{param}"').fetch1("param_notation") + f'= {lb}')
param_notation = ', '.join(param_notation)
model_notation = f'{model[0]} ({param_notation})'
desc = model[4] if len(model) == 5 else '' # model[0] + ': ' + ', '.join(model[1]) # Use the user-defined string if exists
Model.insert1(dict(model_id=model_id, model_class=model_class, model_notation=model_notation, n_params=n_params,
is_bias=is_bias, is_epsilon_greedy=is_epsilon_greedy, is_softmax=is_softmax, is_choice_kernel=is_choice_kernel,
desc=desc, fit_cmd=model[:4]),
skip_duplicates=True)
# Insert Model.Param
for idx, (param, lb, ub) in enumerate(zip(*model[1:4])):
# The result table should save both effective and fixed params
Model.Param.insert1(dict(model_id=model_id, model_param=param, param_idx=idx,
param_lower_bound=lb, param_higher_bound=ub),
skip_duplicates=True)
return
@schema
class ModelComparison(dj.Lookup):
# Define model comparison groups
definition = """
model_comparison_idx: smallint
---
desc='': varchar(200)
"""
class Competitor(dj.Part):
definition = """
-> master
competitor_idx: int
---
-> Model
"""
@classmethod
def load(cls):
model_comparisons = [
['all_models', Model],
['models_with_bias', Model & 'is_bias = 1'],
['models_with_bias_and_choice_kernel', Model & 'is_bias = 1' & 'is_choice_kernel']
]
# Parse and insert ModelComparisonGroup
for mc_idx, (desc, models) in enumerate(model_comparisons):
cls.insert1(dict(model_comparison_idx=mc_idx, desc=desc), skip_duplicates=True)
cls.Competitor.insert([
dict(model_comparison_idx=mc_idx, competitor_idx=idx, model_id=model_id)
for idx, model_id in enumerate(models.fetch('model_id', order_by='model_id'))
], skip_duplicates=True)
@schema
class FittedSessionModel(dj.Computed):
definition = """
-> experiment.Session
-> Model
---
n_trials: int
n_params: int # Number of effective params (return from the fitting function; should be the same as Model.n_params)
log_likelihood: float # raw log likelihood of the model
aic: float # AIC
bic: float # BIC
lpt: float # Likelihood-Per-Trial raw
lpt_aic: float # Likelihood-Per-Trial with AIC penalty
lpt_bic: float # Likelihood-Per-Trial with AIC penalty
prediction_accuracy: float # non-cross-validated prediction accuracy
cross_valid_accuracy_fit: float # cross-validated accuracy (fitting set)
cross_valid_accuracy_test: float # cross-validated accuracy (testing set)
cross_valid_accuracy_test_bias_only = NULL: float # accuracy predicted only by bias (testing set)
"""
key_source = ((foraging_analysis.SessionTaskProtocol() & 'session_task_protocol = 100' & 'session_real_foraging'
) * Model()) # * experiment.Session() & 'session_date > "2021-01-01"' # & 'model_id > 21' # & 'subject_id = 482350'
class Param(dj.Part):
definition = """
-> master
-> Model.Param
---
fitted_value: float
"""
class TrialLatentVariable(dj.Part):
"""
To save all fitted latent variables that will be correlated to ephys
Notes:
1. In the original definition (Sutton&Barto book), the updated value after choice of trial t is Q(t+1), not Q(t)!
behavior & ephys: --> ITI(t-1) --> | --> choice (t), reward(t) --> ITI (t) --> |
model: Q(t) --> choice prob(t) --> choice (t), reward(t) | --> Q(t+1) --> choice prob (t+1)
Therefore: the ITI of trial t -> t+1 corresponds to Q(t+1)
2. To make it more intuitive, when they are inserted here, Q is offset by -1,
such that ephys and model are aligned:
behavior & ephys: --> ITI(t-1) --> | --> choice (t), reward(t) --> ITI (t) --> |
model: Q(t-1) --> choice prob(t-1) | --> choice (t), reward(t) --> Q(t) --> choice prob (t) |
This will eliminate the need of adding an offset=-1 whenever ephys and behavioral model are compared.
3. By doing this, I also save the update after the last trial (useless for fitting; useful for ephys) ,
whereas the first trial is discarded, which was randomly initialized anyway
"""
definition = """
-> master
-> experiment.SessionTrial
-> experiment.WaterPort
---
action_value=null: float
choice_prob=null: float
choice_kernel=null: float
"""
def make(self, key):
choice_history, reward_history, iti, p_reward, q_choice_outcome = get_session_history(key)
model_str = (Model & key).fetch('fit_cmd')
# --- Actual fitting ---
if (Model & key).fetch1('model_class') in ['CANN']: # Only pass ITI if this is CANN model (to save some time?)
model_comparison_this = BanditModelComparison(choice_history, reward_history, iti=iti, model=model_str)
else:
model_comparison_this = BanditModelComparison(choice_history, reward_history, iti=None, model=model_str)
model_comparison_this.fit(pool='', plot_predictive=None, if_verbose=False) # Parallel on sessions, not on DE
model_comparison_this.cross_validate(pool='', k_fold=2, if_verbose=False)
# ------ Grab results ----
fit_result = model_comparison_this.results_raw[0]
cross_valid_result = model_comparison_this.prediction_accuracy_CV.iloc[0]
# Insert session fitted stats
self.insert1(dict(**key,
n_trials=fit_result.n_trials,
n_params=fit_result.k_model,
log_likelihood=fit_result.log_likelihood,
aic=fit_result.AIC,
bic=fit_result.BIC,
lpt=fit_result.LPT,
lpt_aic=fit_result.LPT_AIC,
lpt_bic=fit_result.LPT_BIC,
prediction_accuracy=fit_result.prediction_accuracy,
cross_valid_accuracy_fit=np.mean(cross_valid_result.prediction_accuracy_fit),
cross_valid_accuracy_test=np.mean(cross_valid_result.prediction_accuracy_test),
cross_valid_accuracy_test_bias_only=np.mean(cross_valid_result.prediction_accuracy_test_bias_only),
)
)
# Insert fitted params (`order_by` is critical!)
self.Param.insert([dict(**key, model_param=param, fitted_value=x)
for param, x in zip((Model.Param & key).fetch('model_param', order_by='param_idx'), fit_result.x)])
# Insert latent variables (trial number offset -1 here!!)
choice_prob = fit_result.predictive_choice_prob[:, 1:] # Model must have this
action_value = fit_result.action_value[:, 1:] if hasattr(fit_result, 'action_value') else np.full_like(choice_prob, np.nan)
choice_kernel = fit_result.choice_kernel[:, 1:] if hasattr(fit_result, 'choice_kernel') else np.full_like(choice_prob, np.nan)
for water_port_idx, water_port in enumerate(['left', 'right']):
key['water_port'] = water_port
self.TrialLatentVariable.insert(
[{**key, 'trial': i, 'choice_prob': prob, 'action_value': value, 'choice_kernel': ck}
for i, prob, value, ck in zip(q_choice_outcome.fetch('trial', order_by='trial'),
choice_prob[water_port_idx, :],
action_value[water_port_idx, :],
choice_kernel[water_port_idx, :])]
)
@schema
class FittedSessionModelComparison(dj.Computed):
definition = """
-> experiment.Session
-> ModelComparison
"""
key_source = (experiment.Session & FittedSessionModel) * ModelComparison # Only include already-fitted sessions
class RelativeStat(dj.Part):
definition = """
-> master
-> Model
---
relative_likelihood_aic: float
relative_likelihood_bic: float
model_weight_aic: float
model_weight_bic: float
log10_bf_aic: float # log_10 (Bayes factor)
log10_bf_bic: float # log_10 (Bayes factor)
"""
class BestModel(dj.Part):
definition = """
-> master
---
best_aic: int # model_id of the model with the smallest aic
best_bic: int # model_id of the model with the smallest bic
best_cross_validation_test: int # model_id of the model with the highest cross validation test accuracy
"""
def make(self, key):
competing_models = ModelComparison.Competitor & key
results = (FittedSessionModel & key & competing_models).fetch(format='frame').reset_index()
if len(results) < len(competing_models): # not all fitting results of competing models are ready
return
delta_aic = results.aic - np.min(results.aic)
delta_bic = results.bic - np.min(results.bic)
# Relative likelihood = Bayes factor = p_model/p_best = exp( - delta_aic / 2)
results['relative_likelihood_aic'] = np.exp(- delta_aic / 2)
results['relative_likelihood_bic'] = np.exp(- delta_bic / 2)
# Model weight = Relative likelihood / sum(Relative likelihood)
results['model_weight_aic'] = results['relative_likelihood_aic'] / np.sum(results['relative_likelihood_aic'])
results['model_weight_bic'] = results['relative_likelihood_bic'] / np.sum(results['relative_likelihood_bic'])
# log_10 (Bayes factor) = log_10 (exp( - delta_aic / 2)) = (-delta_aic / 2) / log(10)
results['log10_bf_aic'] = - delta_aic / 2 / np.log(10) # Calculate log10(Bayes factor) (relative likelihood)
results['log10_bf_bic'] = - delta_bic / 2 / np.log(10) # Calculate log10(Bayes factor) (relative likelihood)
best_aic = results.model_id[np.argmin(results.aic)]
best_bic = results.model_id[np.argmin(results.bic)]
best_cross_validation_test = results.model_id[np.argmax(results.cross_valid_accuracy_test)]
results['model_comparison_idx'] = key['model_comparison_idx']
self.insert1(key)
self.RelativeStat.insert(results, ignore_extra_fields=True, skip_duplicates=True)
self.BestModel.insert1({**key,
'best_aic': best_aic,
'best_bic': best_bic,
'best_cross_validation_test': best_cross_validation_test})
# ============= Helpers =============
def get_session_history(session_key, remove_ignored=True):
# Fetch data
q_choice_outcome = (experiment.WaterPortChoice.proj(choice='water_port')
* experiment.BehaviorTrial.proj('outcome', 'early_lick')
* experiment.SessionBlock.BlockTrial) & session_key
if remove_ignored:
q_choice_outcome &= 'outcome != "ignore"'
# TODO: session QC (warm-up and decreased motivation etc.)
# -- Choice and reward --
# 0: left, 1: right, np.nan: ignored
_choice = q_choice_outcome.fetch('choice', order_by='trial')
_choice[_choice == 'left'] = 0
_choice[_choice == 'right'] = 1
_reward = q_choice_outcome.fetch('outcome', order_by='trial') == 'hit'
reward_history = np.zeros([2, len(_reward)]) # .shape = (2, N trials)
for c in (0, 1):
reward_history[c, _choice == c] = (_reward[_choice == c] == True).astype(int)
if remove_ignored: # For model fitting, turn to integer
choice_history = | np.array([_choice]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 13:12:06 2020
@author: peter
"""
import numpy as np
from pathlib import Path
import shutil
import json
import tifffile
import quantities as pq
import scipy.interpolate as interp
import scipy.ndimage as ndimage
import scipy.signal as signal
import pandas as pd
import datetime
import pdb
import re
import f.general_functions as gf
import f.ephys_functions as ef
def get_events_exclude_surround_events(
tc,
std,
surround_tc,
surround_std,
z_score=3,
surround_z=7,
exclude_first=0,
max_overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev = detect_events(tc, std, z_score=z_score, exclude_first=exclude_first)
surrounds_ev = detect_events(
tc, std, z_score=surround_z, exclude_first=exclude_first
)
excluded_dict = {}
dict_drop = []
for key in ev.keys():
if type(key) == str:
continue
if key not in surrounds_ev.keys():
continue
sur_e = surrounds_ev[key].T
e = ev[key].T
# if a detected surround event overlaps for more than max_overlap, then remove
# detects any overlaps
overlapping = np.logical_and(
e[:, 0, None] < sur_e[None, :, 1], e[:, 1, None] >= sur_e[None, :, 0]
)
if not np.any(overlapping):
continue
drop = []
wh = np.where(overlapping)
# now detect size of overlap and delete if proportionally greater than max overlap
for idx in range(len(wh[0])):
overlap = min(e[wh[0][idx], 1], sur_e[wh[1][idx], 1]) - max(
e[wh[0][idx], 0], sur_e[wh[1][idx], 0]
)
if overlap > max_overlap * (e[wh[0][idx], 1] - e[wh[0][idx], 0]):
drop.append(wh[0][idx])
# pdb.set_trace()
exc_e = np.array([x for ii, x in enumerate(e) if ii in drop])
keep_e = np.array([x for ii, x in enumerate(e) if ii not in drop])
excluded_dict[key] = exc_e.T
if len(keep_e) > 0:
ev[key] = keep_e.T
else:
dict_drop.append(key)
# delete empty fields
for key in dict_drop:
del ev[key]
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
# include the surround data
ev["surround_events"] = surrounds_ev
ev["excluded_events"] = excluded_dict
return ev
def get_events_exclude_simultaneous_events(
tc,
std,
z_score=3,
exclude_first=0,
max_events=5,
overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev, excluded_dict = detect_events_remove_simultaneous(
tc,
std,
z_score=z_score,
exclude_first=exclude_first,
max_overlap=overlap,
max_events=max_events,
)
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
ev["excluded_events"] = excluded_dict
ev["surround_events"] = excluded_dict
print("Check this - surrounds and exclude the same")
return ev
def detect_events_remove_simultaneous(
tc, std, z_score=3, exclude_first=0, max_events=5, max_overlap=0.5
):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
# now count simultaneous events and remove those where they are
num_events = np.sum(events, 0)
excluded_events = num_events > max_events
excluded_time = np.where(excluded_events)[0]
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
excluded_result = {}
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
overlap = np.sum(np.isin(llocs, excluded_time).astype(int)) / len(llocs)
if overlap > max_overlap:
excluded_result[idx] = corr_locs.T
else:
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result, excluded_result
def get_surround_masks(masks, surround_rad=20, dilate=True):
def get_bounding_circle_radius(masks):
rows, cols = np.any(masks, axis=-1), np.any(masks, axis=-2)
rs = np.apply_along_axis(first_last, -1, rows)
cs = np.apply_along_axis(first_last, -1, cols)
centers = np.array(
[rs[:, 0] + (rs[:, 1] - rs[:, 0]) / 2, cs[:, 0] + (cs[:, 1] - cs[:, 0]) / 2]
).T
# bounding radius is the hypotenuse /2
radii = np.sqrt((cs[:, 0] - cs[:, 0]) ** 2 + (rs[:, 1] - rs[:, 0]) ** 2) / 2
return radii, centers
def first_last(arr_1d):
return np.where(arr_1d)[0][[0, -1]]
# avoid border effects/bleedthrough by dilating existing rois
structure = np.ones((3, 3, 3))
structure[0::2, ...] = 0
dilated_masks = ndimage.binary_dilation(masks, structure=structure, iterations=4)
roi_rads, centers = get_bounding_circle_radius(dilated_masks)
x, y = np.indices(masks.shape[-2:])
rs = np.sqrt(
(x[None, ...] - centers[:, 0, None, None]) ** 2
+ (y[None, ...] - centers[:, 1, None, None]) ** 2
)
surround_roi = np.logical_xor(
dilated_masks, rs < roi_rads[:, None, None] + surround_rad
)
return surround_roi
def get_surround_masks_cellfree(masks, surround_rad=50, dilate=True):
all_masks = np.any(masks, axis=0)
# avoid border effects/bleedthrough by dilating existing rois
structure = np.ones((3, 3, 3))
structure[0::2, ...] = 0
dilated_masks = ndimage.binary_dilation(masks, structure=structure, iterations=4)
centers = np.array([ndimage.center_of_mass(m) for m in dilated_masks])
x, y = np.indices(masks.shape[-2:])
rs = np.sqrt(
(x[None, ...] - centers[:, 0, None, None]) ** 2
+ (y[None, ...] - centers[:, 1, None, None]) ** 2
)
surround_roi = np.logical_and(~all_masks, rs < surround_rad)
# see if the area is too small
areas = np.sum(surround_roi, axis=(-2, -1))
# check nowhere too small
small = areas < 2000
if np.any(small):
for new_rs in range(surround_rad, 2 * surround_rad, 10):
small = areas < 2000
surround_roi[small] = np.logical_and(~all_masks, rs[small, ...] < new_rs)
if not np.any(small):
break
small = areas < 2000
# revert back to normal behaviour - just take an area around and dont care about cells
if np.any(small):
surround_roi[small] = np.logical_and(masks[small], rs[small, ...] < new_rs)
return surround_roi
def get_observation_length(event_dict):
tc = event_dict["tc_filt"]
exclude_dict = event_dict["surround_events"]
length = tc.shape[1]
lengths = []
# count as non-observed any time during a surround event
for i in range(tc.shape[0]):
if i in exclude_dict.keys():
lengths.append(
length - np.sum(exclude_dict[i].T[:, 1] - exclude_dict[i].T[:, 0])
)
else:
lengths.append(length)
return np.array(lengths)
def apply_exclusion(exclude_dict, tc):
excluded_tc = np.copy(tc)
for roi in exclude_dict.keys():
for i in range(exclude_dict[roi].shape[-1]):
ids = exclude_dict[roi][:, i]
excluded_tc[roi, ids[0] : ids[1]] = 1
return excluded_tc
def soft_threshold(arr, thresh, to=1):
# Thresholds towards to value
res = np.copy(arr)
wh = np.where(np.abs(arr - to) < thresh)
n_wh = np.where(np.abs(arr - to) >= thresh)
sgn = np.sign(arr - to)
res[wh] = to
res[n_wh] -= sgn[n_wh] * thresh
return res
def split_event(t, ids):
# splits a zero-(actually 1) crossing event into multiple non-zero crossing events recursively
# removes one point
if not np.logical_and(
np.any(t[ids[0] : ids[1]] - 1 > 0), np.any(t[ids[0] : ids[1]] - 1 < 0)
):
return [tuple(ids)]
else:
zer_loc = np.argmin(np.abs(t[ids[0] : ids[1]] - 1)) + ids[0]
return split_event(t, (ids[0], zer_loc)) + split_event(t, (zer_loc + 1, ids[1]))
def correct_event_signs(t, llocs):
corr_locs = []
for id_idx, ids in enumerate(llocs):
if np.logical_and(
np.any(t[ids[0] : ids[1]] - 1 > 0), np.any(t[ids[0] : ids[1]] - 1 < 0)
):
split_ids = split_event(t, ids)
corr_locs.extend(split_ids)
else:
corr_locs.append(ids)
corr_locs = np.array(corr_locs)
# if we have split into a zero size (due to boundary issue in split events), remove
if np.any((corr_locs[:, 1] - corr_locs[:, 0]) < 1):
corr_locs = corr_locs[(corr_locs[:, 1] - corr_locs[:, 0]) > 0]
return corr_locs
def recursive_split_locs(seq):
# splits a sequence into n adjacent sequences
diff = np.diff(seq)
if not np.any(diff != 1):
return [(seq[0], seq[-1])]
else:
wh = np.where(diff != 1)[0][0] + 1
return recursive_split_locs(seq[:wh]) + recursive_split_locs(seq[wh:])
def detect_events(tc, std, z_score=3, exclude_first=0):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result
def get_event_properties(event_dict, use_filt=True):
if use_filt:
t = event_dict["tc"]
else:
t = event_dict["tc_filt"]
result_dict = {}
for idx in event_dict.keys():
if type(idx) == str:
continue
event_properties = []
for locs in event_dict[idx].T:
if np.logical_and(
np.any(t[idx, locs[0] : locs[1]] - 1 > 0),
np.any(t[idx, locs[0] : locs[1]] - 1 < 0),
):
print(idx, locs)
raise ValueError("This shouldnt happen")
event_length = locs[1] - locs[0]
event_amplitude = (
t[idx, np.argmax(np.abs(t[idx, locs[0] : locs[1]] - 1)) + locs[0]] - 1
)
event_integrated = np.sum(t[idx, locs[0] : locs[1]] - 1)
event_properties.append([event_length, event_amplitude, event_integrated])
if len(np.array(event_properties)) == 0:
pdb.set_trace()
result_dict[idx] = np.array(event_properties)
event_dict["event_props"] = result_dict
return event_dict
def lab2masks(seg):
masks = []
for i in range(1, seg.max() + 1):
masks.append((seg == i).astype(int))
return np.array(masks)
def t_course_from_roi(nd_stack, roi):
if len(roi.shape) != 2:
raise NotImplementedError("Only works for 2d ROIs")
wh = np.where(roi)
return np.mean(nd_stack[..., wh[0], wh[1]], -1)
def median_t_course_from_roi(nd_stack, roi):
if len(roi.shape) != 2:
raise NotImplementedError("Only works for 2d ROIs")
wh = np.where(roi)
return np.median(nd_stack[..., wh[0], wh[1]], -1)
def std_t_course_from_roi(nd_stack, roi, standard_err):
"""
Gets the standard deviation of the pixels in the roi at each time point
Parameters
----------
nd_stack : TYPE
DESCRIPTION.
roi : TYPE
DESCRIPTION.
Raises
------
NotImplementedError
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
if len(roi.shape) != 2:
raise NotImplementedError("Only works for 2d ROIs")
wh = np.where(roi)
if standard_err:
fac = 1 / np.sqrt(np.sum(roi))
else:
fac = 1
return fac * np.std(nd_stack[..., wh[0], wh[1]], -1)
def load_tif_metadata(fname):
fname = Path(fname)
metadata_file = Path(fname.parent, Path(fname.stem).stem + "_metadata.txt")
if "rds" in str(Path.home()):
to_file = metadata_file
else:
to_file = Path("/tmp/tmp_metadata.txt")
shutil.copy(
metadata_file, to_file
) # this is to deal with a wierd bug due to NTFS filesystem?
with open(to_file, "r") as f:
metadict = json.load(f)
return metadict
def parse_time(metadata_time):
date = metadata_time.split(" ")[0].split("-")
time = metadata_time.split(" ")[1].split(":")
return int("".join(date)), time
def lin_time(time):
return float(time[0]) * 60**2 + float(time[1]) * 60 + float(time[2])
def get_stack_offset(fname, ephys_start):
date, time = parse_time(load_tif_metadata(fname)["Summary"]["StartTime"])
if int(date) != int(ephys_start[0]):
raise ValueError("Date mismatch!")
ttime = [
str(ephys_start[1])[:2],
str(ephys_start[1])[2:4],
str(ephys_start[1])[4:6],
]
offset = lin_time(time) - lin_time(ttime)
if offset < 0:
raise ValueError("Time mismatch!")
return offset
def slice_cam(cam_frames, n_frames, n_repeats, T):
starts = np.where(np.concatenate(([1], np.diff(cam_frames) > 2 * T)))[0]
# remove any consecutive and take last
starts = starts[np.concatenate((~(np.diff(starts) == 1), [True]))]
sliced_frames = np.zeros((n_repeats, n_frames))
for idx in range(n_repeats):
st = starts[idx]
sliced_frames[idx, ...] = cam_frames[st : st + n_frames]
if np.any(np.diff(sliced_frames, axis=-1) > 2 * T):
raise ValueError("Frames not sliced properly")
return sliced_frames
def slice_ephys(analog_signal, single_cam):
idx0 = ef.time_to_idx(analog_signal, single_cam[0] * pq.s)
idx1 = ef.time_to_idx(analog_signal, single_cam[-1] * pq.s)
return analog_signal[idx0:idx1]
def slice_all_ephys(analog_signal, sliced_cam):
all_ephys = []
sh = len(analog_signal)
for ca in sliced_cam:
ep = slice_ephys(analog_signal, ca)
if len(ep) < sh:
sh = len(ep)
all_ephys.append(ep)
return np.array([np.squeeze(all_ephys[i][:sh]) for i in range(len(all_ephys))])
def get_steps_image_ephys(im_dir, ephys_fname):
ephys_dict = ef.load_ephys_parse(
ephys_fname, analog_names=["vcVm", "vcIm"], event_names=["CamDown"]
)
files = [f for f in Path(im_dir).glob("./**/*.tif")]
offsets = np.array([get_stack_offset(f, ephys_dict["ephys_start"]) for f in files])
offsets, files = gf.sort_zipped_lists([offsets, files])
for idx, f in enumerate(files):
stack = tifffile.imread(f)
if idx == 0:
stacks = np.zeros(((len(files),) + stack.shape), dtype=np.uint16)
stacks[idx, ...] = stack
metadata = load_tif_metadata(files[0])
T = float(metadata["FrameKey-0-0-0"]["HamamatsuHam_DCAM-Exposure"]) * 10**-3
cam = ephys_dict["cam"]
cam = cam[
np.logical_and(
cam > offsets[0] - 10, cam < offsets[-1] + stacks.shape[1] * T + 10
)
]
sliced_cam = slice_cam(cam, stacks.shape[1], stacks.shape[0], T)
ephys_dict["sliced_cam"] = sliced_cam
ephys_dict["cam"] = cam
if np.any(np.diff(sliced_cam[:, 0] - offsets) > stacks.shape[1] * T):
raise ValueError("Problemo!")
# now slice the ephys from the cam
for key in ["vcVm", "ccVm", "ccIm", "vcIm"]:
if key not in ephys_dict.keys():
continue
ephys_dict[key + "_sliced"] = slice_all_ephys(ephys_dict[key], sliced_cam)
idx0 = ef.time_to_idx(ephys_dict[key], offsets[0] - 10)
idx1 = ef.time_to_idx(ephys_dict[key], offsets[-1] + 10)
ephys_dict[key] = ephys_dict[key][idx0:idx1]
return ephys_dict, stacks
def process_ratio_stacks(stacks):
"""
assumes dims = (....,t,y,x)
"""
sh = stacks.shape
stacks = stacks.reshape((-1,) + sh[-3:])
res = np.zeros((stacks.shape[0], 2) + sh[-3:]).astype(float)
for idx, st in enumerate(stacks):
res[idx, ...] = interpolate_stack(st)
return res.reshape(sh[:-3] + (2,) + sh[-3:])
def interpolate_stack(ratio_stack, framelim=1000):
nits = int(np.ceil(ratio_stack.shape[0] / framelim))
full_res = np.zeros((2,) + ratio_stack.shape)
for it in range(nits):
stack = ratio_stack[it * framelim : (it + 1) * framelim, ...]
result = np.zeros((2,) + stack.shape)
y, x = (
np.arange(stack.shape[1], dtype=int),
np.arange(stack.shape[2], dtype=int),
)
z = [
np.arange(0, stack.shape[0], 2, dtype=int),
np.arange(1, stack.shape[0], 2, dtype=int),
]
for i in range(2):
j = np.mod(i + 1, 2)
result[i, i::2, ...] = stack[i::2, ...]
interped = interp.RegularGridInterpolator(
(z[i], y, x), stack[i::2, ...], bounds_error=False, fill_value=None
)
pts = np.indices(stack.shape, dtype=int)[:, j::2, ...].reshape((3, -1))
result[i, j::2, ...] = interped(pts.T).reshape(stack[1::2, ...].shape)
full_res[:, it * framelim : it * framelim + result.shape[1], ...] = result
return full_res
def get_LED_powers(LED, cam, T_approx, cam_edge="falling"):
# assumes LED and cam contain only sliced vals, cam is camDown
if cam_edge != "falling":
raise NotImplementedError("Only implemented for cam falling edge")
# do a rough pass then a second to get LED real value
ids = ef.time_to_idx(
LED,
[
cam[1] + T_approx,
cam[1] + 3 * T_approx,
cam[0] - T_approx,
cam[0],
cam[1] - T_approx,
cam[1],
],
)
zer = LED[ids[0] : ids[1]].magnitude.mean()
l1 = LED[ids[2] : ids[3]].magnitude.mean()
l2 = LED[ids[4] : ids[5]].magnitude.mean()
thr = 0.5 * (zer + min(l1, l2)) + zer
LED_thr = LED > thr
##get actual T
T = (np.sum(LED_thr.astype(int)) / len(cam)) / LED.sampling_rate.magnitude
if np.abs(T - T_approx) > T_approx / 2:
print(T)
print(T_approx)
print("Problems?")
# now get accurate values
ids1 = np.array(
[
ef.time_to_idx(LED, cam[::2] - 3 * T / 4),
ef.time_to_idx(LED, cam[::2] - T / 4),
]
).T
led1 = np.mean([LED[x[0] : x[1]].magnitude.mean() for x in ids1])
ids2 = np.array(
[
ef.time_to_idx(LED, cam[1::2] - 3 * T / 4),
ef.time_to_idx(LED, cam[1::2] - T / 4),
]
).T
led2 = np.mean([LED[x[0] : x[1]].magnitude.mean() for x in ids2])
ids3 = np.array(
[ef.time_to_idx(LED, cam[1:-1:2] + T), ef.time_to_idx(LED, cam[2::2] - 5 * T)]
).T
zer = np.mean([LED[x[0] : x[1]].magnitude.mean() for x in ids3])
led1 -= zer
led2 -= zer
return led1, led2
def cam_check(cam, cam_id, times, e_start, fs):
if cam_id + len(times) > len(cam):
print("length issue")
return False
if len(times) % 2 == 1:
times = times[:-1]
cam_seg = cam[cam_id : cam_id + len(times)]
IFI = np.array([np.diff(cam_seg[::2]), np.diff(cam_seg[1::2])])
# check frame rate consistent
if np.any(np.abs(IFI - 1 / fs) > (1 / fs) / 100):
print("IFI issue")
return False
# compare our segment with if we are off by one each direction - are we at a minimum?
if cam_id + len(times) == len(cam):
if cam_id == 0: # exactly 10000 frames
return True
v = [-1, 0]
elif cam_id == 0:
v = [0, 1]
else:
v = [-1, 0, 1]
var = [
np.std(cam[cam_id + x : cam_id + x + len(times)] + e_start - times) for x in v
]
if var[1] != min(var) and cam_id != 0:
print("Bad times?")
return False
elif var[0] != min(var) and cam_id == 0:
print("Bad times?")
return False
return True
def save_result_hdf(hdf_file, result_dict, group=None):
f = hd5py.File(hdf_file, "a")
if group is not None:
group = f'{group}/{to_trial_string(result_dict["tif_file"])}'
else:
group = f'{to_trial_string(result_dict["tif_file"])}'
grp = f.create_group(group)
for key in result_dict.keys():
t = type(result_dict[key])
if t == "neo.core.analogsignal.AnalogSignal":
print(0)
elif t == "numpy.ndarray":
print(1)
else:
raise NotImplementedError("Implement this")
def get_all_frame_times(metadict):
frames = []
times = []
for k in metadict.keys():
if k == "Summary":
continue
frame = int(k.split("-")[1])
frames.append(frame)
time = (
metadict[k]["UserData"]["TimeReceivedByCore"]["scalar"]
.split(" ")[1]
.split(":")
)
time = float(time[0]) * 60**2 + float(time[1]) * 60 + float(time[2])
times.append(time)
frames, times = gf.sort_zipped_lists([frames, times])
return np.array(frames), np.array(times)
def load_and_slice_long_ratio(
stack_fname, ephys_fname, T_approx=3 * 10**-3, fs=5, washin=False, nofilt=False
):
stack = tifffile.imread(stack_fname)
n_frames = len(stack)
if Path(ephys_fname).is_file():
ephys_dict = ef.load_ephys_parse(
ephys_fname, analog_names=["LED", "vcVm"], event_names=["CamDown"]
)
e_start = [
float(str(ephys_dict["ephys_start"][1])[i * 2 : (i + 1) * 2])
for i in range(3)
]
e_start[-1] += (float(ephys_dict["ephys_start"][2]) / 10) / 1000
e_start = lin_time(e_start)
meta = load_tif_metadata(stack_fname)
frames, times = get_all_frame_times(meta)
cam = ephys_dict["CamDown_times"]
cam_id = np.argmin( | np.abs(cam + e_start - times[0]) | numpy.abs |
import os
import matplotlib.pyplot as plt
import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from keras.utils import np_utils
from keras import regularizers
import cv2
import os
from keras.layers import Input
from keras import backend as K
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Lambda
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
import tensorflow as tf
import tensorflow as tf
from keras.optimizers import Adam
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from keras.utils import np_utils
from keras import regularizers
import cv2
import os
from sklearn.metrics import confusion_matrix
import h5py
from keras.callbacks import ModelCheckpoint
from keras.losses import binary_crossentropy
from keras.losses import categorical_crossentropy
from sklearn.model_selection import train_test_split
# fix random seed for reproducibility
# we always initialize the random number generator to a constant seed #value for reproducibility of results.
seed = 7
np.random.seed(seed)
# load data from the path specified by the user
def data_loader(path_train, path_test):
train_list = os.listdir(path_train)
'''
# Map class names to integer labels
train_class_labels = { label: index for index, label in enumerate(class_names) }
'''
# Number of classes in the dataset
num_classes = len(train_list)
# Empty lists for loading training and testing data images as well as corresponding labels
x_train = []
y_train_1 = []
y_train_2 = []
y_train_3 = []
y_train_4 = []
x_test = []
y_test_1 = []
y_test_2 = []
y_test_3 = []
y_test_4 = []
# Loading training data
for label, elem in enumerate(train_list):
path1 = path_train + '/' + str(elem)
images = os.listdir(path1)
for elem2 in images:
path2 = path1 + '/' + str(elem2)
# Read the image form the directory
img = cv2.imread(path2)
# Append image to the train data list
x_train.append(img)
# Append class-label corresponding to the image
path_1 = os.path.basename(path1)
label = np.asarray(path_1.split('_'))
y_train_1.append(str(label[0]))
y_train_2.append(str(label[1]))
y_train_3.append(str(label[2]))
y_train_4.append(str(label[3]))
# Loading testing data
path1 = path_test + '/' + str(elem)
images = os.listdir(path1)
for elem2 in images:
path2 = path1 + '/' + str(elem2)
# Read the image form the directory
img = cv2.imread(path2)
# Append image to the test data list
x_test.append(img)
path_2 = os.path.basename(path2)
label = np.asarray(path_2.split('_'))
# Append class-label corresponding to the image
y_test_1.append(str(label[0]))
y_test_2.append(str(label[1]))
y_test_3.append(str(label[2]))
y_test_4.append(str(label[3]))
# Convert lists into numpy arrays
x_train = np.asarray(x_train)
y_train_1 = np.asarray(y_train_1)
y_train_2 = np.asarray(y_train_2)
y_train_3 = | np.asarray(y_train_3) | numpy.asarray |
# -*- coding: utf-8 -*-
import sys
sys.path.append('..')
import numpy as np
import vectorflow as vf
male_heights = np.random.normal(171, 6, 500)
female_heights = np.random.normal(158, 5, 500)
male_weights = np.random.normal(70, 10, 500)
female_weights = np.random.normal(57, 8, 500)
male_bfrs = np.random.normal(16, 2, 500)
female_bfrs = np.random.normal(22, 2, 500)
male_labels = [1] * 500
female_labels = [-1] * 500
train_set = np.array([np.concatenate((male_heights, female_heights)),
np.concatenate((male_weights, female_weights)),
np.concatenate((male_bfrs, female_bfrs)),
np.concatenate((male_labels, female_labels))]).T
# 随机打乱样本顺序
| np.random.shuffle(train_set) | numpy.random.shuffle |
from typing import Optional, Tuple
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision.transforms import transforms
from src.datamodules.datasets.brian_scans_t1w import BrianScansT1w
from src.utils.utils import calculate_mean
from sklearn.model_selection import train_test_split
from src.utils import utils
import numpy as np
import torch
log = utils.get_logger(__name__)
class MRIDataModule(LightningDataModule):
"""
Example of LightningDataModule for MNIST dataset.
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
dataset_dir,
data_dir: str = "data/",
train_val_test_split: Tuple[int, int, int] = (0.7, 0.15, 0.15),
batch_size: int = 64,
num_workers: int = 0,
pin_memory: bool = False,
**kwargs,
):
super().__init__()
self.dataset_dir = dataset_dir
self.data_dir = data_dir
self.train_val_test_split = train_val_test_split
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.labels_counter = None
self.train_transforms = None
self.test_transforms = None
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
@property
def num_classes(self) -> int:
return 4
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
# BrianScansT1w(dataset_path=self.dataset_dir)
pass
def setup(self, stage: Optional[str] = None):
"""Load data. Set variables: self.data_train, self.data_val, self.data_test."""
dataset = BrianScansT1w(self.dataset_dir)
log.info(f"Calculating mean and std of the dataset")
# mean, std = calculate_mean(dataset, dataset.num_channels)
self.setup_transforms()
dataset = BrianScansT1w(self.dataset_dir, transform=self.test_transforms)
self.labels_counter = dataset.labels_counter
train_dataset_idx, val_dataset_idx = train_test_split(
np.arange(len(dataset.labels)),
train_size=0.6,
shuffle=True,
stratify=dataset.labels,
random_state=1
)
val_dataset_idx, test_dataset_idx = train_test_split(
val_dataset_idx,
train_size=0.5,
shuffle=True,
stratify= | np.array(dataset.labels) | numpy.array |
## Copyright (c) 2017 <NAME> GmbH
## All rights reserved.
##
## This source code is licensed under the MIT license found in the
## LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import numpy as np
from torch.nn.utils import weight_norm
import pickle
import sys
from termcolor import colored
from modules.hierarchical_embedding import HierarchicalEmbedding
from modules.embeddings import LearnableEmbedding, SineEmbedding
def sqdist(A, B):
return (A**2).sum(dim=2)[:,:,None] + (B**2).sum(dim=2)[:,None,:] - 2 * torch.bmm(A, B.transpose(1,2))
class ResidualBlock(nn.Module):
def __init__(self, d_in, d_out, groups=1, dropout=0.0):
super().__init__()
assert d_in % groups == 0, "Input dimension must be a multiple of groups"
assert d_out % groups == 0, "Output dimension must be a multiple of groups"
self.d_in = d_in
self.d_out = d_out
self.proj = nn.Sequential(nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Conv1d(d_out, d_out, kernel_size=1, groups=groups),
nn.Dropout(dropout))
if d_in != d_out:
self.downsample = nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups)
def forward(self, x):
assert x.size(1) == self.d_in, "x dimension does not agree with d_in"
return x + self.proj(x) if self.d_in == self.d_out else self.downsample(x) + self.proj(x)
class GraphLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_head, dropout=0.0, attn_dropout=0.0, wnorm=False, use_quad=False, lev=0):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_head = d_head
self.dropout = nn.Dropout(dropout)
self.attn_dropout = nn.Dropout(attn_dropout)
self.lev = lev
self.use_quad = use_quad
# To produce the query-key-value for the self-attention computation
self.qkv_net = nn.Linear(d_model, 3*d_model)
self.o_net = nn.Linear(n_head*d_head, d_model, bias=False)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.proj1 = nn.Linear(d_model, d_inner)
self.proj2 = nn.Linear(d_inner, d_model)
self.gamma = nn.Parameter(torch.ones(4, 4)) # For different sub-matrices of D
self.sqrtd = | np.sqrt(d_head) | numpy.sqrt |
#!/usr/bin/python3
# number of output figures = 8
import numpy as np
import matplotlib.patches
import helper.basis
from helper.figure import Figure
import helper.grid
import helper.misc
import helper.plot
def isEquivalent(l, lp, xl):
d = xl.shape[0]
T = [t for t in range(d) if l[t] == lp[t]]
return all([min(l[t], lp[t]) >= xl[t] for t in range(d) if t not in T])
def plotSGScheme(basis, n, showDiagonal=True, highlightedSubspaces=None,
highlightedPoints=None, whiteMode=False, withBoundary=True,
isModified=False, combinationTechnique=False,
singleGrid=False, equivalenceRelation=False):
subspaceSize = 1
subspaceMargin = 0.2
basisSize = (0.3 if singleGrid else 0.6)
basisMargin = (0.1 if singleGrid else 0.2)
upperLevel = n
if singleGrid:
lowerLevel = n
levelSumDiagonal = 2*n
elif withBoundary:
lowerLevel = 0
levelSumDiagonal = n
else:
lowerLevel = 1
levelSumDiagonal = n+1
numberOfSubspaces = upperLevel - lowerLevel + 1
schemeSize = numberOfSubspaces * (subspaceSize + subspaceMargin) - subspaceMargin
xOffsetGlobal = basisSize + basisMargin
yOffsetGlobal = schemeSize
xSquare = np.array([0, 1, 1, 0, 0])
ySquare = np.array([0, 0, 1, 1, 0])
I = (helper.grid.getNodalIndices if combinationTechnique else
helper.grid.getHierarchicalIndices)
hellhellblau = helper.plot.mixColors("hellblau", 0.5)
fig = Figure.create(figsize=(3, 3), scale=1, facecolor="none",
preamble=r"""
\definecolor{{hellhellblau}}{{rgb}}{{{},{},{}}}
""".format(*hellhellblau))
ax = fig.gca()
stairsCornersInner = []
stairsCornersOuter = []
for l0 in range(lowerLevel, upperLevel + 1):
Il0 = I(l0)
hl0Inv = 2**l0
hl0 = 1 / hl0Inv
Xl0 = [i * hl0 for i in Il0]
for l1 in range(lowerLevel, upperLevel + 1):
Il1 = I(l1)
hl1Inv = 2**l1
hl1 = 1 / hl1Inv
Xl1 = [i * hl1 for i in Il1]
xOffset = (xOffsetGlobal + (l0 - lowerLevel) *
(subspaceSize + subspaceMargin))
yOffset = (yOffsetGlobal - (l1 - lowerLevel) *
(subspaceSize + subspaceMargin) - subspaceSize)
corner = (xOffset + subspaceSize + subspaceMargin / 2,
yOffset - subspaceMargin / 2)
if l0 + l1 == levelSumDiagonal - 1:
stairsCornersInner.append(corner)
elif l0 + l1 == levelSumDiagonal:
stairsCornersOuter.append(corner)
s = lambda x, y: (xOffset + subspaceSize * np.array(x),
yOffset + subspaceSize * np.array(y))
if combinationTechnique:
brightness = 0.4
if equivalenceRelation:
if levelSumDiagonal - 1 <= l0 + l1 <= levelSumDiagonal:
xl = np.array((2, 1))
xi = np.array((1, 1))
allL = ([(i, n-i) for i in range(n+1)] +
[(i, n-i-1) for i in range(n)])
L = [l for l in allL if not ((l[0] >= xl[0]) and (l[1] >= xl[1]))]
equivalenceClasses = helper.misc.getEquivalenceClasses(
L, (lambda l, lp: isEquivalent(l, lp, xl)))
J = [j for j, equivalenceClass in enumerate(equivalenceClasses)
if (l0, l1) in equivalenceClass]
if len(J) == 0:
j = None
contourColor = "anthrazit!20"
faceColor = helper.plot.mixColors("anthrazit", 0.2)
color = "k"
else:
j = J[0]
colorIndices = [2, 5, 7]
contourColor = "C{}".format(colorIndices[j])
faceColor = contourColor
color = [brightness * x
for x in matplotlib.colors.to_rgba(contourColor)[:3]]
else:
contourColor = "anthrazit!20"
faceColor = helper.plot.mixColors("anthrazit", 0.2)
color = 3*[0.6]
else:
if l0 + l1 == levelSumDiagonal:
if singleGrid:
contourColor = "hellhellblau"
faceColor = hellhellblau
color = "mittelblau"
else:
contourColor = "C4"
faceColor = "C4"
color = [brightness * x
for x in matplotlib.colors.to_rgba(contourColor)[:3]]
elif l0 + l1 == levelSumDiagonal - 1:
contourColor = "C1"
faceColor = "C1"
color = [brightness * x
for x in matplotlib.colors.to_rgba(contourColor)[:3]]
else:
contourColor = "anthrazit!20"
faceColor = helper.plot.mixColors("anthrazit", 0.2)
color = 3*[0.6]
rect = matplotlib.patches.Rectangle(
(xOffset, yOffset), subspaceSize, subspaceSize, edgecolor="none",
facecolor=faceColor)
ax.add_patch(rect)
elif (((highlightedSubspaces is not None) and
((l0, l1) in highlightedSubspaces)) or
((highlightedSubspaces is None) and
(highlightedPoints is None) and
(l0 + l1 <= levelSumDiagonal))):
contourColor = "hellhellblau"
color = "mittelblau"
rect = matplotlib.patches.Rectangle(
(xOffset, yOffset), subspaceSize, subspaceSize, edgecolor="none",
facecolor=hellhellblau)
ax.add_patch(rect)
else:
contourColor = "white"
color = "k"
if highlightedPoints is not None:
borderColor = "k"
else:
borderColor = color
Xl = np.array([(x0, x1) for x0 in Xl0 for x1 in Xl1])
if highlightedPoints is None:
K = np.zeros((Xl.shape[0],), dtype=bool)
else:
K = np.any(np.all(highlightedPoints == Xl[:,np.newaxis], axis=2), axis=1)
for k in np.where(K)[0]:
x = Xl[k,:]
rect = matplotlib.patches.Rectangle(
s(max(x[0]-hl0, 0), max(x[1]-hl1, 0)), min(2*hl0, 1), min(2*hl1, 1),
edgecolor="none", facecolor=hellhellblau)
ax.add_patch(rect)
ax.plot(*s(xSquare, ySquare), "-", clip_on=False, color=borderColor)
superscript = (r"\modified" if isModified else "")
subspaceName = ("V" if combinationTechnique else "W")
text = "${}_{{({},{})}}^{{{}}}$".format(
subspaceName, l0, l1, superscript)
y = 0.042
if not (equivalenceRelation and (l0 >= 2) and (l1 == 0)):
text = "\\contour{{{}}}{{{}}}".format(contourColor, text)
y = 0
ax.text(*s(0.5, y), text, color=color, ha="center", va="bottom")
ax.plot(*s(Xl[K,0], Xl[K,1]), ".", clip_on=False,
color="mittelblau")
ax.plot(*s(Xl[np.invert(K),0], Xl[np.invert(K),1]), ".", clip_on=False,
color=color)
if not combinationTechnique:
if l0 > 0:
for x0 in np.linspace(0, 1, hl0Inv // 2 + 1):
ax.plot(*s([x0, x0], [0, 1]), "-", clip_on=False,
color=borderColor)
if l1 > 0:
for x1 in np.linspace(0, 1, hl1Inv // 2 + 1):
ax.plot(*s([0, 1], [x1, x1]), "-", clip_on=False,
color=borderColor)
elif (equivalenceRelation and
(levelSumDiagonal - 1 <= l0 + l1 <= levelSumDiagonal)):
d = 2
x = helper.grid.getCoordinates(xl, xi)
ax.plot(*s(x[0], x[1]), "x", clip_on=False, color=color,
markeredgewidth=2)
if j is not None:
lAst = [l0, l1]
for l2 in equivalenceClasses[j]:
lAst = [(None if lAst[t] is None else
(lAst[t] if l2[t] == lAst[t] else None))
for t in range(d)]
T = [t for t in range(d) if lAst[t] is not None]
if T[0] == 0:
ax.plot(*s([0, 1], [x[1], x[1]]), "-", clip_on=False, color=color)
else:
ax.plot(*s([x[0], x[0]], [0, 1]), "-", clip_on=False, color=color)
if not singleGrid:
stairsCorners = [stairsCornersOuter[0]]
for l in range(n - lowerLevel):
stairsCorners.append(stairsCornersInner[l])
stairsCorners.append(stairsCornersOuter[l+1])
stairsCorners = np.array(stairsCorners)
if showDiagonal:
ax.plot(stairsCorners[:,0], stairsCorners[:,1], "k--", clip_on=False)
maxY = {}
modifiedScale = (0.56 if isModified else 1)
for l in range(lowerLevel, upperLevel + 1):
maxY[l] = 0
for i in I(l):
lb, ub = basis.getSupport(l, i)
xx = np.linspace(lb, ub, 33)
yy = basis.evaluate(l, i, xx)
if (i == 1) or (i == 2**l - 1): yy = [modifiedScale * y for y in yy]
maxY[l] = max(max(yy), maxY[l])
for l0 in range(lowerLevel, upperLevel + 1):
xOffset = xOffsetGlobal + (l0 - lowerLevel) * (subspaceSize + subspaceMargin)
for i0 in I(l0):
color = "C{}".format((i0 * 2**(n - l0)) % 9)
yOffset = yOffsetGlobal + basisMargin
s = lambda x, y: (xOffset + subspaceSize * np.array(x),
yOffset + basisSize * np.array(y) / maxY[l0])
lb, ub = basis.getSupport(l0, i0)
xx = np.linspace(lb, ub, 33)
yy = basis.evaluate(l0, i0, xx)
if (i0 == 1) or (i0 == 2**l0 - 1): yy = [modifiedScale * y for y in yy]
ax.plot(*s(xx, yy), "-", clip_on=False, color=color)
for l1 in range(lowerLevel, upperLevel + 1):
yOffset = (yOffsetGlobal - (l1 - lowerLevel) *
(subspaceSize + subspaceMargin) - subspaceSize)
for i1 in I(l1):
color = "C{}".format((i1 * 2**(n - l1)) % 9)
xOffset = xOffsetGlobal - basisMargin
s = lambda x, y: (xOffset - basisSize * np.array(y) / maxY[l1],
yOffset + subspaceSize * np.array(x))
lb, ub = basis.getSupport(l1, i1)
xx = np.linspace(lb, ub, 200)
yy = basis.evaluate(l1, i1, xx)
if (i1 == 1) or (i1 == 2**l1 - 1): yy = [modifiedScale * y for y in yy]
ax.plot(*s(xx, yy), "-", clip_on=False, color=color)
color = "k"
ax.plot((xOffsetGlobal - basisMargin) * np.ones((2,)),
[0, yOffsetGlobal + basisMargin + 0.2 * basisSize],
"-", c=color, clip_on=False)
ax.plot([xOffsetGlobal - basisMargin - 0.2 * basisSize,
xOffsetGlobal + schemeSize],
(yOffsetGlobal + basisMargin) * np.ones((2,)),
"-", c=color, clip_on=False)
ax.set_xlim([0, xOffsetGlobal + schemeSize])
ax.set_ylim([0, yOffsetGlobal + basisMargin + basisSize])
ax.set_xticks([])
ax.set_yticks([])
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
return fig, ax
def plotGrid(sgType, n=None, includedSubspaces=[], includedPoints=None,
withBoundary=True, distribution="uniform", scale=1.65):
fig = Figure.create(figsize=(1, 1), scale=scale)
ax = fig.gca()
X = np.zeros((0, 2))
I = lambda l: ([0, 1] if l == 0 else list(range(1, 2**l, 2)))
if (sgType == "full") or (sgType == "regular"):
upperLevel = n
if withBoundary:
lowerLevel = 0
levelSumDiagonal = n
else:
lowerLevel = 1
levelSumDiagonal = n + 1
for l0 in range(lowerLevel, upperLevel + 1):
Xl0 = helper.grid.getCoordinates(len(I(l0)) * [l0], I(l0),
distribution=distribution)
for l1 in range(lowerLevel, upperLevel + 1):
Xl1 = helper.grid.getCoordinates(len(I(l1)) * [l1], I(l1),
distribution=distribution)
Xl = np.array([(x0, x1) for x0 in Xl0 for x1 in Xl1])
if (sgType == "full") or ((sgType == "regular") and
(l0 + l1 <= levelSumDiagonal)):
X = | np.vstack((X, Xl)) | numpy.vstack |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: <NAME> 刘祥德
@license: (C) Copyright 2019-now, Node Supply Chain Manager Corporation Limited.
@contact: <EMAIL>
@software:
@file: warp.py
@time: 10/3/19 4:58 PM
@version 1.0
@desc:
"""
import logging
import os
import random
import cv2
import numpy as np
import torch
import torchvision
from PIL import Image
from skimage.transform import PiecewiseAffineTransform, warp
from config.config import setup_logging, DEBUG
from constant import *
from utils.misc import label_list, AngleFactory, image2label
from utils.transforms import ToUnNormalizedTensor
logger_name = 'warp_logger'
level = logging.INFO
logger = setup_logging('.', logger_name, level)
# CARI_IMG_PATH = '../datasets/Caricature-img'
# FACE_IMG_PATH = '../datasets/CelebA-HQ-img'
# CARI_DATASET_PATH = '../datasets/Caricature-mask'
# FACE_DATASET_PATH = '../datasets/CelebAMaskHQ-mask'
# CARI_DATASET_COLOR_PATH = '../datasets/Caricature-mask-color'
# FACE_DATASET_COLOR_PATH = '../datasets/CelebAMaskHQ-mask-color'
# FACE_WARPED = '../datasets/CelebA-HQ-img-Warped'
face_img_name = '1.png'
cari_img_name = '1'
face_mask_path = os.path.join(FACE_MASK_PATH, face_img_name)
face_path = os.path.join(FACE_IMG_PATH, '1.jpg')
cari_mask_path = os.path.join(CARI_MASK_PATH, cari_img_name + '.png')
cari_path = os.path.join(CARI_IMG_PATH, cari_img_name + '.jpg')
face_mask = cv2.imread(face_mask_path, cv2.IMREAD_GRAYSCALE)
cari_mask = cv2.imread(cari_mask_path, cv2.IMREAD_GRAYSCALE)
# 'skin', 'nose', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'mouth', 'u_lip','l_lip'
# sample_num_list = [50, 50, 50, 50, 50, 50, 50, 50, 50, 50]
sample_num_list = [80, 50, 50, 25, 25, 25, 25, 30, 20, 20]
# sample_num_list = [50, 50, 50, 25, 25, 25, 25, 30, 20, 20]
# sample_num_list = [50, 50, 20, 20, 20, 20, 20, 20, 20, 20]
face = cv2.imread(face_path)
cari = cv2.imread(cari_path)
transforms = [torchvision.transforms.Resize(512), ToUnNormalizedTensor()]
transforms = torchvision.transforms.Compose(transforms)
# face_torch = transforms(Image.open(face_path))
def warp_image(image, src_points=None, dst_points=None, transform=None):
if transform is None:
if src_points is not None and dst_points is not None:
transform = get_transform(image, src_points, dst_points)
else:
raise Exception('Src points and dst points must not be None.')
warped = warp(image, transform, output_shape=image.shape)
return warped, transform
def warp_nearest(image, src_points=None, dst_points=None, transform=None):
if transform is None:
if src_points is not None and dst_points is not None:
transform = get_transform(image, src_points, dst_points)
else:
raise Exception('Src points and dst points must not be None.')
warped = warp(image, transform, output_shape=image.shape, order=0)
return warped, transform
def get_transform(image, src_points, dst_points):
src_points = np.array(
[
[0, 0], [0, image.shape[0]],
[image.shape[0], 0], list(image.shape[:2])
] + src_points.tolist()
)
dst_points = np.array(
[
[0, 0], [0, image.shape[0]],
[image.shape[0], 0], list(image.shape[:2])
] + dst_points.tolist()
)
tform3 = PiecewiseAffineTransform()
tform3.estimate(dst_points, src_points)
return tform3
def sample_arrange(src, num, label):
"""
Sample key points by equal spaing
:param src:
:param num:
:return:
"""
arrange = len(src)
# if num > len(src):
# logger.info("Num out of length, return arrange: [{}]".format(src))
# return src
# else:
# output = np.array((1, 2), dtype=arrange.dtype)
output = []
seg = arrange // num
if seg == 0:
msg = '[{}]: The number of sampling points exceeds the number of source points, and the original array is ' \
'equidistantly filled.'.format(label)
logger.info(msg)
return insert_equal_space(src, arrange, num)
seg = arrange / num
for n in range(num):
if int(seg * n) >= len(src):
output.append((src[-1] + src[-2]) // 2)
else:
output.append(src[int(seg * n)])
return output
def insert_equal_space(src, arrange, num):
output = src.copy()
need = num - arrange
sample_space = need // arrange
mod = need % arrange
position = 1
for idx in range(arrange):
# is_enough = False
pre_el = src[idx]
next_el = src[(idx + 1) % arrange]
output = fill(pre_el, next_el, position, sample_space, output)
position += (sample_space + 1)
if len(output) == num:
return output.reshape(-1, 2)
else:
for idx in range(mod):
output = np.append(output, src[-1])
return output.reshape(-1, 2)
def fill(pre_el, next_el, position, sample_space, output):
for j in range(sample_space):
sample = (pre_el + next_el) // (sample_space + 1) * (j + 1)
output = np.insert(output, position + j, sample.reshape(2), axis=0)
return output
def is_filtered(points):
return len(points) == 1 and (points == | np.array([[-1, -1]]) | numpy.array |
# Class: XYZFile
# used for getUncertaintyDEM
# by <NAME>, Jul 28 2016
#
# Class: AmpcoroffFile
# manipulating the ampcor outpuf (and translating it into a geotiff)
# by <NAME>, Jul 10 2018
import numpy as np
from carst.libraster import SingleRaster
from scipy.interpolate import griddata
from scipy.stats import gaussian_kde
import pickle
import matplotlib.pyplot as plt
class DuoZArray:
def __init__(self, z1=None, z2=None, ini=None):
self.z1 = z1
self.z2 = z2
self.ini = ini
self.signal_idx = None
def OutlierDetection2D(self, thres_sigma=3.0, plot=True):
x = self.z1
y = self.z2
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
thres_multiplier = np.e ** (thres_sigma ** 2 / 2) # normal dist., +- sigma number
thres = max(z) / thres_multiplier
idx = z >= thres
self.signal_idx = idx
if plot:
pt_style = {'s': 5, 'edgecolor': None}
ax_center = [x[idx].mean(), y[idx].mean()]
ax_halfwidth = max([max(x) - x[idx].mean(),
x[idx].mean() - min(x),
max(y) - y[idx].mean(),
y[idx].mean() - min(y)]) + 1
plt.subplot(121)
plt.scatter(x, y, c=z, **pt_style)
plt.scatter(x[~idx], y[~idx], c='xkcd:red', **pt_style)
plt.axis('scaled')
plt.xlim([ax_center[0] - ax_halfwidth, ax_center[0] + ax_halfwidth])
plt.ylim([ax_center[1] - ax_halfwidth, ax_center[1] + ax_halfwidth])
plt.ylabel('Offset-Y (pixels)')
plt.xlabel('Offset-X (pixels)')
plt.subplot(122)
plt.scatter(x, y, c=z, **pt_style)
plt.scatter(x[~idx], y[~idx], c='xkcd:red', **pt_style)
plt.axis('scaled')
plt.xlim([min(x[idx]) - 1, max(x[idx]) + 1])
plt.ylim([min(y[idx]) - 1, max(y[idx]) + 1])
plt.savefig(self.ini.velocorrection['label_bedrock_histogram'] + '_vx-vs-vy.png', format='png', dpi=200)
plt.clf()
def HistWithOutliers(self, which=None):
if which == 'x':
x = self.z1
pnglabel = '_vx.png'
elif which == 'y':
x = self.z2
pnglabel = '_vy.png'
else:
raise ValueError('Please indicate "x" or "y" for your histogram.')
r_uniq, r_uniq_n = np.unique(x, return_counts=True)
b_uniq, b_uniq_n = np.unique(x[self.signal_idx], return_counts=True)
bar_w = min(np.diff(r_uniq))
lbound = min(x[self.signal_idx]) - np.std(x)
rbound = max(x[self.signal_idx]) + np.std(x)
N_outside_lbound_red = int(sum(x < lbound))
N_outside_rbound_red = int(sum(x > rbound))
plt.bar(r_uniq, r_uniq_n, width=bar_w, color='xkcd:red')
plt.bar(b_uniq, b_uniq_n, width=bar_w, color='xkcd:blue')
plt.xlim([lbound, rbound])
title_str = 'Red points outside (L|R): {}|{}'.format(N_outside_lbound_red, N_outside_rbound_red)
plt.title(title_str)
plt.ylabel('N')
plt.xlabel('offset (pixels)')
plt.savefig(self.ini.velocorrection['label_bedrock_histogram'] + pnglabel, format='png', dpi=200)
plt.clf()
def VeloCorrectionInfo(self):
a = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date'])
b = SingleRaster(self.ini.imagepair['image2'], date=self.ini.imagepair['image2_date'])
datedelta = b.date - a.date
geot = a.GetGeoTransform()
xres = geot[1]
yres = geot[5]
x_culled = self.z1[self.signal_idx]
y_culled = self.z2[self.signal_idx]
self.z1.MAD_median = np.median(x_culled)
self.z1.MAD_std = np.std(x_culled, ddof=1)
self.z1.MAD_mean = np.mean(x_culled)
self.z2.MAD_median = np.median(y_culled)
self.z2.MAD_std = np.std(y_culled, ddof=1)
self.z2.MAD_mean = np.mean(y_culled)
vx_zarray_velo = self.z1[:] * abs(xres) / datedelta.days
vx_zarray_velo.MAD_median = self.z1.MAD_median * abs(xres) / datedelta.days
vx_zarray_velo.MAD_std = self.z1.MAD_std * abs(xres) / datedelta.days
vx_zarray_velo.MAD_mean = self.z1.MAD_mean * abs(xres) / datedelta.days
vy_zarray_velo = self.z2[:] * abs(yres) / datedelta.days
vy_zarray_velo.MAD_median = self.z2.MAD_median * abs(yres) / datedelta.days
vy_zarray_velo.MAD_std = self.z2.MAD_std * abs(yres) / datedelta.days
vy_zarray_velo.MAD_mean = self.z2.MAD_mean * abs(yres) / datedelta.days
with open(self.ini.velocorrection['label_logfile'], 'w') as f:
f.write( 'Total points over bedrock = {:6n}\n'.format(self.z1.size) )
f.write( '-------- Unit: Pixels --------\n')
f.write( 'median_x_px = {:6.3f}\n'.format(float(self.z1.MAD_median)) )
f.write( 'median_y_px = {:6.3f}\n'.format(float(self.z2.MAD_median)) )
f.write( 'std_x_px = {:6.3f}\n'.format(float(self.z1.MAD_std)) )
f.write( 'std_y_px = {:6.3f}\n'.format(float(self.z2.MAD_std)) )
f.write( 'mean_x_px = {:6.3f}\n'.format(float(self.z1.MAD_mean)) )
f.write( 'mean_y_px = {:6.3f}\n'.format(float(self.z2.MAD_mean)) )
f.write( '-------- Unit: Velocity (L/T; most likely m/day) --------\n')
f.write( 'median_x = {:6.3f}\n'.format(float(vx_zarray_velo.MAD_median)) )
f.write( 'median_y = {:6.3f}\n'.format(float(vy_zarray_velo.MAD_median)) )
f.write( 'std_x = {:6.3f}\n'.format(float(vx_zarray_velo.MAD_std)) )
f.write( 'std_y = {:6.3f}\n'.format(float(vy_zarray_velo.MAD_std)) )
f.write( 'mean_x = {:6.3f}\n'.format(float(vx_zarray_velo.MAD_mean)) )
f.write( 'mean_y = {:6.3f}\n'.format(float(vy_zarray_velo.MAD_mean)) )
return vx_zarray_velo, vy_zarray_velo
class ZArray(np.ndarray):
# A subclass from ndarray, with some new attributes and fancier methods for our purposes
# please see
# https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html
# for more details.
#WARNING: NO NANs SHOULD BE FOUND IN ZArray !!! IT CAN GIVE YOU A BAD RESULT !!!
def __new__(cls, input_array):
# For now input_array should be a 1-d array
# Input array is an already formed ndarray instance
# We need first to cast to be our class type
obj = np.asarray(input_array).view(cls)
obj.MAD_idx = None
obj.MAD_mean = None
obj.MAD_median = None
obj.MAD_std = None
# obj.signal_val = None
# obj.signal_n = None
obj.signal_array = None
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.MAD_idx = getattr(obj, 'MAD_idx', None)
self.MAD_mean = getattr(obj, 'MAD_mean', None)
self.MAD_median = getattr(obj, 'MAD_median', None)
self.MAD_std = getattr(obj, 'MAD_std', None)
# self.signal_val = getattr(obj, 'signal_val', None)
# self.signal_n = getattr(obj, 'signal_n', None)
self.signal_array = getattr(obj, 'signal_array', None)
# =============================================================================================
# ==== The following functions represent new functions developed from =========================
# ==== StatisticOutput and HistWithOutliers. ==================================================
# =============================================================================================
def MADStats(self):
mad = lambda x : 1.482 * np.median(abs(x - np.median(x)))
if self.size <= 3:
print('WARNING: there are too few Z records (<= 3). Aborting the calculation.')
return [], np.nan, np.nan, np.nan
else:
val_median = np.median(self)
val_mad = mad(self)
lbound = val_median - 3. * val_mad
ubound = val_median + 3. * val_mad
idx = np.logical_and(self >= lbound, self <= ubound)
self.MAD_idx = idx
self.MAD_mean = np.mean(self[idx])
self.MAD_median = np.median(self[idx])
self.MAD_std = np.std(self[idx], ddof=1)
def MADHist(self, pngname):
nbins = len(self) // 4 + 1
nbins = 201 if nbins > 201 else nbins
bins = np.linspace(min(self), max(self), nbins)
plt.hist(self, bins=bins, color='xkcd:red')
plt.hist(self[self.MAD_idx], bins=bins, color='xkcd:blue')
plt.ylabel('N')
plt.xlabel('Value (pixel value unit)')
plt.savefig(pngname, format='png')
plt.cla()
# =============================================================================================
# ==== The functions above represent new functions developed from =============================
# ==== StatisticOutput and HistWithOutliers. ==================================================
# =============================================================================================
# =============================================================================================
# ==== The following functions are designed firstly for the functions in the class XYZFile ====
# ==== and later is has modified to a QGIS processing scripts called MAD_outlier_filter.py ====
# ==== now we have copied them back. ==========================================================
# =============================================================================================
# <NAME> on Oct 25, 2018, added the background correction
# the default of mad_multiplier was 3.0
# background correction redesigned on Nov 9, 2018 using more information from the PX
def StatisticOutput(self, plot=True, pngname=None, ini=None):
mad = lambda x : 1.482 * np.median(abs(x - np.median(x)))
if self.size == 0:
print('WARNING: there is no Z records.')
return [], np.nan, np.nan, np.nan
else:
# if ini is not None:
# ref_raster = SingleRaster(ini.imagepair['image1'])
# -> to be continued
mad_multiplier = ini.noiseremoval['peak_detection']
uniq, uniq_n = np.unique(self, return_counts=True)
uniq, uniq_n = fill_with_zero(uniq, uniq_n, ini.pxsettings['oversampling'])
uniq_n_est, _, _ = backcor(uniq, uniq_n, order=ini.noiseremoval['backcor_order'])
background_mad = mad(uniq_n - uniq_n_est) # this is actually the noise level
if background_mad == 0:
background_mad = np.median(abs(uniq_n - uniq_n_est))
print("Use the median of abs(uniq_n - uniq_n_est) as one SNR level since mad = 0")
background_threshold = uniq_n_est + mad_multiplier * background_mad
signal_idx = np.argwhere(uniq_n >= background_threshold)
signal_idx = np.ndarray.flatten(signal_idx)
signal_val = uniq[signal_idx]
# self.signal_val = uniq[signal_idx]
signal_n = uniq_n[signal_idx]
# self.signal_n = uniq_n[signal_idx]
self.signal_array = np.repeat(signal_val, signal_n.astype(int))
self.MAD_mean = self.signal_array.mean()
self.MAD_median = np.median(self.signal_array)
self.MAD_std = self.signal_array.std(ddof=1)
# offset_median = np.median(self.signal_array)
# offset_mad = mad(self.signal_array)
# if offset_mad == 0:
# # the case when over half of the numbers are at the median number,
# # we use the Median absolute deviation around the mean instead of around the median.
# offset_mad = 1.482 * np.median(abs(self.signal_array - np.mean(self.signal_array)))
# lbound = offset_median - mad_multiplier * offset_mad
# ubound = offset_median + mad_multiplier * offset_mad
# self.MAD_idx = np.logical_and(self.signal_array > lbound, self.signal_array < ubound)
# trimmed_numlist = self.signal_array[self.MAD_idx]
# self.MAD_mean = trimmed_numlist.mean()
# self.MAD_median = np.median(trimmed_numlist)
# self.MAD_std = trimmed_numlist.std(ddof=1)
if plot == True and pngname is not None:
self.VerifyBackcor(pngname, uniq, uniq_n, uniq_n_est, background_threshold)
self.HistWithOutliers(pngname)
pickle.dump(self, open(pngname.replace('.png', '.p'), 'wb'))
# return idx2, trimmed_numlist.mean(), np.median(trimmed_numlist), trimmed_numlist.std(ddof=1)
def VerifyBackcor(self, pngname, uniq, uniq_n, uniq_n_est, background_threshold):
import matplotlib.pyplot as plt
pngname = pngname.replace('.png', '-backcor.png')
plt.plot(uniq, uniq_n, label='Histogram', color='xkcd:plum')
plt.plot(uniq, uniq_n_est, label='Background', color='xkcd:lightgreen')
plt.plot(uniq, background_threshold, label='Detection Threshold', color='xkcd:coral')
# plt.xlim([min(uniq), max(uniq)])
plt.ylabel('N')
plt.xlabel('offset (pixels)')
plt.legend(loc='best')
plt.savefig(pngname, format='png', dpi=200)
plt.cla()
def HistWithOutliers(self, pngname, histogram_bound=10):
import matplotlib.pyplot as plt
nbins = len(self) // 4 + 1
nbins = 201 if nbins > 201 else nbins
lbound = min(self) if (min(self) >= -histogram_bound) or (np.mean(self) < -histogram_bound) else -histogram_bound
rbound = max(self) if (max(self) <= histogram_bound) or (np.mean(self) > histogram_bound) else histogram_bound
if lbound >= rbound:
lbound = min(self)
rbound = max(self)
bins = np.linspace(lbound, rbound, nbins)
# trimmed_numlist = self.signal_array[self.MAD_idx]
trimmed_numlist = self.signal_array
N_outside_lbound_red = int(sum(self < lbound))
N_outside_rbound_red = int(sum(self > rbound))
N_outside_lbound_blue = int(sum(trimmed_numlist < lbound))
N_outside_rbound_blue = int(sum(trimmed_numlist > rbound))
title_str = '[Red|Blue] L outside: [{}|{}] R outside: [{}|{}]'.format(N_outside_lbound_red, N_outside_lbound_blue, N_outside_rbound_red, N_outside_rbound_blue)
# plot histograms
plt.hist(self, bins=bins, color=[0.95, 0.25, 0.1])
plt.hist(trimmed_numlist, bins=bins, color=[0.1, 0.25, 0.95])
plt.ylabel('N')
plt.xlabel('offset (pixels)')
plt.title(title_str)
plt.savefig(pngname, format='png', dpi=200)
plt.cla()
# =============================================================================================
# ==== The functions above are designed firstly for the functions in the class XYZFile ========
# ==== and later is has modified to a QGIS processing scripts called MAD_outlier_filter.py ====
# ==== now we have copied them back. ==========================================================
# =============================================================================================
class XYZFile:
def __init__(self, fpath=None, refpts_path=None, dem_path=None):
self.fpath = fpath
self.refpts_path = refpts_path
self.dem_path = dem_path
self.data = None
self.diffval = None
self.diffval_trimmed = None
def Read(self):
"""
self.data will be usually a 3- or 4-column np.array
column 1: easting
column 2: northing
column 3: height of the 1st group (usually reference points)
column 4: height of the 2nd group (usually DEM points made from grdtrack)
"""
self.data = np.loadtxt(self.fpath)
def StatisticOutput(self, pngname):
# for getUncertaintyDEM
mad = lambda x : 1.482 * np.median(abs(x - np.median(x)))
if self.data.size == 0:
print('NOTE: ' + self.dem_path + ' does not cover any ref points.')
return [self.dem_path, '', '', '', '', '', '', self.refpts_path]
elif self.data.shape[1] == 4:
idx = ~np.isnan(self.data[:, 3])
self.diffval = self.data[idx, 3] - self.data[idx, 2]
offset_median = np.median(self.diffval)
offset_mad = mad(self.diffval)
lbound = offset_median - 3. * offset_mad
ubound = offset_median + 3. * offset_mad
idx2 = np.logical_and(self.diffval > lbound, self.diffval < ubound)
self.diffval_trimmed = self.diffval[idx2]
# The return value is ready for CsvTable.SaveData method.
# ['filename', 'date', 'uncertainty', 'mean_offset_wrt_refpts', \
# 'trimmed_N', 'trimming_lb', 'trimming_up', 'refpts_file']
# 'date' is an empty string since we don't specify any date string in .xyz file.
self.HistWithOutliers(pngname)
return [self.dem_path, '', self.diffval_trimmed.std(ddof=1), self.diffval_trimmed.mean(), \
len(self.diffval_trimmed), lbound, ubound, self.refpts_path]
elif self.data.shape[1] == 3:
print("Not yet designed.")
return []
else:
print("This program currently doesn't support the xyz file whose column number is not 3 or 4.")
return []
def HistWithOutliers(self, pngname):
# for getUncertaintyDEM
import matplotlib.pyplot as plt
nbins = len(self.diffval) // 5
nbins = 200 if nbins > 200 else nbins
bins = np.linspace(min(self.diffval), max(self.diffval), nbins)
plt.hist(self.diffval, bins=bins, color=[0.95, 0.25, 0.1])
plt.hist(self.diffval_trimmed, bins=bins, color=[0.1, 0.25, 0.95])
plt.ylabel('N')
plt.xlabel('offset (pixel value unit)')
plt.savefig(pngname, format='png')
plt.cla()
class AmpcoroffFile:
def __init__(self, fpath=None):
self.fpath = fpath
self.data = None
self.velo_x = None
self.velo_y = None
self.snr = None
self.err_x = None
self.err_y = None
self.ini = None
self.xyv_velo_x = None
self.xyv_velo_y = None
self.xyv_mag = None
self.xyv_snr = None
self.xyv_err_x = None
self.xyv_err_y = None
def Load(self):
"""
self.data will be usually a 3- or 4-column np.array
column 1: x cell # (from ul to the East)
column 2: offset along across (x) direction, in pixels
column 3: y cell # (from ul to the South)
column 4: offset along down (y) direction, in pixels
column 5: SNR ratio
column 6: Conv 1 (x)
column 7: Conv 2 (y)
column 8: Conv 3
"""
import pickle
self.data = pickle.load(open(self.fpath, 'rb'))
# self.data = np.loadtxt(self.fpath)
self.CheckData()
def CheckData(self):
"""
Check if there's any strange value in the ampoff.
"""
# 1000 is an arbitrary value
idx = np.argwhere(abs(self.data[:, [1, 3]]) > 1000)
idx = np.unique(idx[:, 0])
self.data = np.delete(self.data, idx, 0)
def SetIni(self, ini):
self.ini = ini
def FillwithNAN(self):
"""
Fill hole with nan value.
"""
x_linenum = np.arange(min(self.data[:, 0]), max(self.data[:, 0]) + self.ini.pxsettings['skip_across'], self.ini.pxsettings['skip_across'])
y_linenum = np.unique(self.data[:, 2])
xx_linenum, yy_linenum = np.meshgrid(x_linenum, y_linenum)
complete_xymap = np.vstack((xx_linenum.flatten(), yy_linenum.flatten())).T
raw_xymap = self.data[:, [0, 2]]
# ---- collapse x & y linenumber to a 1-d array.
#### NOTE THIS WILL GO WRONG IF YOU HAVE A SUPER HUGE ARRAY!
cxy = complete_xymap[:, 0] * 1000000 + complete_xymap[:, 1]
rxy = raw_xymap[:, 0] * 1000000 + raw_xymap[:, 1]
# ----
idx = np.where(np.isin(cxy, rxy))
idx = idx[0]
newdata = np.empty((complete_xymap.shape[0],8))
newdata[:] = np.nan
newdata[:,[0,2]] = complete_xymap
newdata[idx,1] = self.data[:, 1]
newdata[idx,3] = self.data[:, 3]
newdata[idx,4] = self.data[:, 4]
newdata[idx,5] = self.data[:, 5]
newdata[idx,6] = self.data[:, 6]
newdata[idx,7] = self.data[:, 7]
self.data = newdata
def Ampcoroff2Velo(self, ref_raster=None, datedelta=None, velo_or_pixel='velo'):
"""
ref_raster: a SingleRaster object that is used for this pixel tracking
datedelta: a timedelta object that is the time span between two input images
these values will override the settings from self.ini, if self.ini also exists.
the final output is
1. self.velo_x -> the x comp of velocity (m/days) at where Ampcor has processed
2. self.velo_y -> the y comp of velocity (m/days) ...
3. self.snr -> the Signal-to-Noise Ratio ...
4. self.err_x -> the x comp of the error of the velocity (m/days) ....
5. self.err_y -> the y comp of the error of the velocity (m/days) ....
All of these are in N-by-3 array, and the columns are
1) projected x coor, 2) projected y coor, 3) the desired quantity, respectively.
"""
if ref_raster is None:
ref_raster = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date'])
if datedelta is None:
a = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date'])
b = SingleRaster(self.ini.imagepair['image2'], date=self.ini.imagepair['image2_date'])
datedelta = b.date - a.date
geot = ref_raster.GetGeoTransform()
ulx = geot[0]
uly = geot[3]
xres = geot[1]
yres = geot[5]
if velo_or_pixel == 'velo':
self.data[:, 0] = ulx + (self.data[:, 0] - 1) * xres
self.data[:, 1] = self.data[:, 1] * abs(xres) / datedelta.days
self.data[:, 2] = uly + (self.data[:, 2] - 1) * yres
self.data[:, 3] = self.data[:, 3] * abs(yres) / datedelta.days
self.data[:, 5] = np.sqrt(self.data[:, 5]) / datedelta.days
self.data[:, 6] = np.sqrt(self.data[:, 6]) / datedelta.days
self.velo_x = self.data[:,[0,2,1]]
self.velo_y = self.data[:,[0,2,3]]
self.velo_y[:, -1] = -self.velo_y[:, -1] # UL-LR system to Cartesian
self.snr = self.data[:,[0,2,4]]
self.err_x = self.data[:,[0,2,5]]
self.err_y = self.data[:,[0,2,6]]
elif velo_or_pixel == 'pixel':
self.data[:, 0] = ulx + (self.data[:, 0] - 1) * xres
self.data[:, 2] = uly + (self.data[:, 2] - 1) * yres
self.velo_x = self.data[:,[0,2,1]]
self.velo_y = self.data[:,[0,2,3]]
self.velo_y[:, -1] = -self.velo_y[:, -1] # UL-LR system to Cartesian
self.snr = self.data[:,[0,2,4]]
self.err_x = self.data[:,[0,2,5]]
self.err_y = self.data[:,[0,2,6]]
def Velo2XYV(self, xyvfileprefix=None, spatialres=None, generate_xyztext=False):
"""
spatialres: the spatial resolution of the XYV file.
xyvfileprefix: the prefix for output xyv file.
the final output is
self.xyv_... -> after griddata, the data have been warped into a grid with a fixed spatial resolution.
"""
if xyvfileprefix is None:
xyvfileprefix = self.ini.rawoutput['label_geotiff']
if spatialres is None:
y_list = np.unique(self.velo_x[:, 1])
spatialres = np.sqrt((self.velo_x[1, 0] - self.velo_x[0, 0]) * (y_list[-1] - y_list[-2]))
x = np.arange(min(self.velo_x[:, 0]), max(self.velo_x[:, 0]), spatialres)
y = np.arange(max(self.velo_x[:, 1]), min(self.velo_x[:, 1]), -spatialres)
xx, yy = np.meshgrid(x, y)
vx = griddata(self.velo_x[:, [0,1]], self.velo_x[:, 2], (xx, yy), method='linear')
vy = griddata(self.velo_y[:, [0,1]], self.velo_y[:, 2], (xx, yy), method='linear')
mag = np.sqrt(vx ** 2 + vy ** 2)
snr = griddata(self.snr[:, [0,1]], self.snr[:, 2], (xx, yy), method='linear')
errx = griddata(self.err_x[:, [0,1]], self.err_x[:, 2], (xx, yy), method='linear')
erry = griddata(self.err_y[:, [0,1]], self.err_y[:, 2], (xx, yy), method='linear')
self.xyv_velo_x = np.stack([xx.flatten(), yy.flatten(), vx.flatten()]).T
self.xyv_velo_y = np.stack([xx.flatten(), yy.flatten(), vy.flatten()]).T
self.xyv_mag = np.stack([xx.flatten(), yy.flatten(), mag.flatten()]).T
self.xyv_snr = np.stack([xx.flatten(), yy.flatten(), snr.flatten()]).T
self.xyv_err_x = np.stack([xx.flatten(), yy.flatten(), errx.flatten()]).T
self.xyv_err_y = np.stack([xx.flatten(), yy.flatten(), erry.flatten()]).T
if generate_xyztext:
np.savetxt(xyvfileprefix + '_vx.xyz', self.xyv_velo_x, delimiter=" ", fmt='%10.2f %10.2f %10.6f')
np.savetxt(xyvfileprefix + '_vy.xyz', self.xyv_velo_y, delimiter=" ", fmt='%10.2f %10.2f %10.6f')
np.savetxt(xyvfileprefix + '_mag.xyz', self.xyv_mag, delimiter=" ", fmt='%10.2f %10.2f %10.6f')
np.savetxt(xyvfileprefix + '_snr.xyz', self.xyv_snr, delimiter=" ", fmt='%10.2f %10.2f %10.6f')
np.savetxt(xyvfileprefix + '_errx.xyz', self.xyv_err_x, delimiter=" ", fmt='%10.2f %10.2f %10.6f')
np.savetxt(xyvfileprefix + '_erry.xyz', self.xyv_err_y, delimiter=" ", fmt='%10.2f %10.2f %10.6f')
def XYV2Raster(self, xyvfileprefix=None, ref_raster=None):
"""
xyvfileprefix: the prefix for output xyv file.
"""
if xyvfileprefix is None:
xyvfileprefix = self.ini.rawoutput['label_geotiff']
if ref_raster is None:
ref_raster = SingleRaster(self.ini.imagepair['image1'], date=self.ini.imagepair['image1_date'])
# vx_xyz = xyvfileprefix + '_vx.xyz'
# vy_xyz = xyvfileprefix + '_vy.xyz'
# mag_xyz = xyvfileprefix + '_mag.xyz'
# vx_gtiff = vx_xyz.replace('xyz', 'tif')
# vy_gtiff = vy_xyz.replace('xyz', 'tif')
# mag_gtiff = mag_xyz.replace('xyz', 'tif')
nodata_val = -9999.0
self.xyv_velo_x[np.isnan(self.xyv_velo_x)] = nodata_val
self.xyv_velo_y[np.isnan(self.xyv_velo_y)] = nodata_val
self.xyv_mag[np.isnan(self.xyv_mag)] = nodata_val
self.xyv_snr[np.isnan(self.xyv_snr)] = nodata_val
self.xyv_err_x[np.isnan(self.xyv_err_x)] = nodata_val
self.xyv_err_y[np.isnan(self.xyv_err_y)] = nodata_val
vx_gtiff = xyvfileprefix + '_vx.tif'
vy_gtiff = xyvfileprefix + '_vy.tif'
mag_gtiff = xyvfileprefix + '_mag.tif'
snr_gtiff = xyvfileprefix + '_snr.tif'
errx_gtiff = xyvfileprefix + '_errx.tif'
erry_gtiff = xyvfileprefix + '_erry.tif'
xraster = SingleRaster(vx_gtiff)
yraster = SingleRaster(vy_gtiff)
magraster = SingleRaster(mag_gtiff)
snrraster = SingleRaster(snr_gtiff)
errxraster = SingleRaster(errx_gtiff)
erryraster = SingleRaster(erry_gtiff)
proj = ref_raster.GetProjection()
# print(self.xyv_velo_x)
# print(proj)
# xraster.XYZ2Raster(vx_xyz, projection=proj)
# yraster.XYZ2Raster(vy_xyz, projection=proj)
# magraster.XYZ2Raster(mag_xyz, projection=proj)
xraster.XYZArray2Raster(self.xyv_velo_x, projection=proj)
yraster.XYZArray2Raster(self.xyv_velo_y, projection=proj)
magraster.XYZArray2Raster(self.xyv_mag, projection=proj)
snrraster.XYZArray2Raster(self.xyv_snr, projection=proj)
errxraster.XYZArray2Raster(self.xyv_err_x, projection=proj)
erryraster.XYZArray2Raster(self.xyv_err_y, projection=proj)
xraster.SetNoDataValue(nodata_val)
yraster.SetNoDataValue(nodata_val)
magraster.SetNoDataValue(nodata_val)
snrraster.SetNoDataValue(nodata_val)
errxraster.SetNoDataValue(nodata_val)
errxraster.SetNoDataValue(nodata_val)
def points_in_polygon(points_geometry, shp_filename):
# points_geometry: N-by-2 np array defining the geometry of points
# shp_filename: shapefile name
# Both datasets should have the SAME CRS!
# return: np mask array showing where the targeted points are.
import logging
logging.basicConfig(level=logging.WARNING)
import geopandas as gpd
from shapely.geometry import Point
# from shapely.geometry import mapping
shapefile = gpd.read_file(shp_filename)
poly_geometries = [shapefile.loc[i]['geometry'] for i in range(len(shapefile))]
pt_geometries = [Point(xy) for xy in zip(points_geometry[:, 0], points_geometry[:, 1])]
pt_gs = gpd.GeoSeries(pt_geometries)
idx = None
for single_poly in poly_geometries:
if idx is None:
idx = pt_gs.within(single_poly)
else:
tmp = pt_gs.within(single_poly)
idx = np.logical_or(idx, tmp)
return idx
def fill_with_zero(uniq, uniq_n, ini_oversampling):
"""
Fill the gaps between min and max with zero (counts)
for the uniq and uniq_n.
"""
# ---- Verification of the sub-pixel sampling rate
def_sampling_rate = ini_oversampling * 2 # sampling rate defined in the ini file
real_sampling_rate = 1 / min(np.diff(uniq))
if int(def_sampling_rate) != int(real_sampling_rate):
raise ValueError('Over-sampling rate of the data mismatches the one defined in the ini file!')
# ----
complete_uniq = np.arange(min(uniq), max(uniq) + min( | np.diff(uniq) | numpy.diff |
from __future__ import print_function
"""
Using OpenDNS domain query activity, we retrieve 5 days
of queries/hour to a domain for 240+ domains (stored
in dns.json). We predict the number of queries in
the next hour using a LSTM recurrent neural network.
An ad hoc anomaly detection is outlined in the final
for loop.
Refer to:
http://stackoverflow.com/questions/25967922/pybrain-time-series-prediction-using-lstm-recurrent-nets
https://github.com/pybrain/pybrain/blob/master/examples/supervised/neuralnets%2Bsvm/example_rnn.py
"""
from pybrain.supervised import RPropMinusTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
from pybrain.datasets import SequentialDataSet
from pybrain.tools.xml import NetworkWriter
from sys import stdout
import numpy as np
import random
import json
# Get queries/hr data
with open('dns.json', 'r') as f:
samples = map(lambda x: x['ts'][:-2], json.load(f))
# Shuffle to partition test/train
random.shuffle(samples)
# Set train & test data
train_data, test_data = samples[:50], samples[200:]
# Initialize ds for rnn for 1 obsv and 1 next
ds = SequentialDataSet(1, 1)
# Add each timeseries (ts)
for ts in train_data:
ds.newSequence()
# Add obsv and next
for t_1, t_2 in zip(ts, ts[1:]):
ds.addSample(t_1, t_2)
# RNN with 1-5-1 architecture: 1 input, 5 hidden, 1 output layer
rnn = buildNetwork(1, 5, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
# Initialize trainer
trainer = RPropMinusTrainer(rnn, dataset=ds)
# Predefine iterations: epochs & cycles
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
# Training loop
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
error = trainer.testOnData()
epoch = (i + 1) * EPOCHS_PER_CYCLE
print("\r Epoch: {}/{} Error: {}".format(epoch, EPOCHS, error), end="")
stdout.flush()
# Save model
NetworkWriter.writeToFile(rnn, 'rnn3.xml')
# Ad hoc test
for test in test_data:
for i in xrange(0, len(test) - 6, 5):
# Get 5 obs, 6th we wish to predict
obs, nxt = test[i:i + 5], test[i + 6]
# Predict all
prds = map(rnn.activate, obs)
# Get 6th prediction
prd = prds.pop()[0]
# Test if prd is anomalous
anm = prd > (1 + | np.mean(obs) | numpy.mean |
"""
Lean rigid transformation class
Author: Jeff
"""
import logging
import os
import numpy as np
import scipy.linalg
from . import utils
from . import transformations
from .points import BagOfPoints, BagOfVectors, Point, PointCloud, Direction, NormalCloud
from .dual_quaternion import DualQuaternion
try:
from geometry_msgs import msg
except:
logging.warning('Failed to import geometry msgs in rigid_transformations.py.')
try:
import rospy
import rosservice
except ImportError:
logging.warning("Failed to import ros dependencies in rigid_transforms.py")
try:
from autolab_core.srv import *
except ImportError:
logging.warning("autolab_core not installed as catkin package, RigidTransform ros methods will be unavailable")
import subprocess
TF_EXTENSION = '.tf'
STF_EXTENSION = '.stf'
class RigidTransform(object):
"""A Rigid Transformation from one frame to another.
"""
def __init__(self, rotation=np.eye(3), translation=np.zeros(3),
from_frame='unassigned', to_frame='world'):
"""Initialize a RigidTransform.
Parameters
----------
rotation : :obj:`numpy.ndarray` of float
A 3x3 rotation matrix (should be unitary).
translation : :obj:`numpy.ndarray` of float
A 3-entry translation vector.
from_frame : :obj:`str`
A name for the frame of reference on which this transform
operates. This and to_frame are used for checking compositions
of RigidTransforms, which is useful for debugging and catching
errors.
to_frame : :obj:`str`
A name for the frame of reference to which this transform
moves objects.
Raises
------
ValueError
If any of the arguments are invalid. The frames must be strings or
unicode, the translations and rotations must be ndarrays, have the
correct shape, and the determinant of the rotation matrix should be
1.0.
"""
if not isinstance(from_frame, str) and not isinstance(from_frame, unicode):
raise ValueError('Must provide string name of input frame of data')
if not isinstance(to_frame, str) and not isinstance(to_frame, unicode):
raise ValueError('Must provide string name of output frame of data')
self.rotation = rotation
self.translation = translation
self._from_frame = str(from_frame)
self._to_frame = str(to_frame)
def copy(self):
"""Returns a copy of the RigidTransform.
Returns
-------
:obj:`RigidTransform`
A deep copy of the RigidTransform.
"""
return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame)
def _check_valid_rotation(self, rotation):
"""Checks that the given rotation matrix is valid.
"""
if not isinstance(rotation, np.ndarray) or not np.issubdtype(rotation.dtype, np.number):
raise ValueError('Rotation must be specified as numeric numpy array')
if len(rotation.shape) != 2 or rotation.shape[0] != 3 or rotation.shape[1] != 3:
raise ValueError('Rotation must be specified as a 3x3 ndarray')
if np.abs(np.linalg.det(rotation) - 1.0) > 1e-3:
raise ValueError('Illegal rotation. Must have determinant == 1.0')
def _check_valid_translation(self, translation):
"""Checks that the translation vector is valid.
"""
if not isinstance(translation, np.ndarray) or not np.issubdtype(translation.dtype, np.number):
raise ValueError('Translation must be specified as numeric numpy array')
t = translation.squeeze()
if len(t.shape) != 1 or t.shape[0] != 3:
raise ValueError('Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray')
@property
def rotation(self):
""":obj:`numpy.ndarray` of float: A 3x3 rotation matrix.
"""
return self._rotation
@rotation.setter
def rotation(self, rotation):
# Convert quaternions
if len(rotation) == 4:
q = np.array([q for q in rotation])
if np.abs(np.linalg.norm(q) - 1.0) > 1e-3:
raise ValueError('Invalid quaternion. Must be norm 1.0')
rotation = RigidTransform.rotation_from_quaternion(q)
# Convert lists and tuples
if type(rotation) in (list, tuple):
rotation = np.array(rotation).astype(np.float32)
self._check_valid_rotation(rotation)
self._rotation = rotation * 1.
@property
def translation(self):
""":obj:`numpy.ndarray` of float: A 3-ndarray that represents the
transform's translation vector.
"""
return self._translation
@translation.setter
def translation(self, translation):
# Convert lists to translation arrays
if type(translation) in (list, tuple) and len(translation) == 3:
translation = np.array([t for t in translation]).astype(np.float32)
self._check_valid_translation(translation)
self._translation = translation.squeeze() * 1.
@property
def position(self):
""":obj:`numpy.ndarray` of float: A 3-ndarray that represents the
transform's translation vector (same as translation).
"""
return self._translation
@position.setter
def position(self, position):
self.translation = position
@property
def adjoint_tf(self):
A = np.zeros([6,6])
A[:3,:3] = self.rotation
A[3:,:3] = utils.skew(self.translation).dot(self.rotation)
A[3:,3:] = self.rotation
return A
@property
def from_frame(self):
""":obj:`str`: The identifier for the 'from' frame of reference.
"""
return self._from_frame
@from_frame.setter
def from_frame(self, from_frame):
self._from_frame = str(from_frame)
@property
def to_frame(self):
""":obj:`str`: The identifier for the 'to' frame of reference.
"""
return self._to_frame
@to_frame.setter
def to_frame(self, to_frame):
self._to_frame = str(to_frame)
@property
def euler_angles(self):
""":obj:`tuple` of float: The three euler angles for the rotation.
"""
q_wxyz = self.quaternion
q_xyzw = np.roll(q_wxyz, -1)
return transformations.euler_from_quaternion(q_xyzw)
@property
def quaternion(self):
""":obj:`numpy.ndarray` of float: A quaternion vector in wxyz layout.
"""
q_xyzw = transformations.quaternion_from_matrix(self.matrix)
q_wxyz = np.roll(q_xyzw, 1)
return q_wxyz
@property
def dual_quaternion(self):
""":obj:`DualQuaternion`: The DualQuaternion corresponding to this
transform.
"""
qr = self.quaternion
qd = np.append([0], self.translation / 2.)
return DualQuaternion(qr, qd)
@property
def axis_angle(self):
""":obj:`numpy.ndarray` of float: The axis-angle representation for the rotation.
"""
qw, qx, qy, qz = self.quaternion
theta = 2 * np.arccos(qw)
omega = np.array([1,0,0])
if theta > 0:
rx = qx / np.sqrt(1.0 - qw**2)
ry = qy / | np.sqrt(1.0 - qw**2) | numpy.sqrt |
import numpy as np
from mayavi import mlab
from BDQuaternions import Conventions
from BDSpace.Coordinates import Cartesian
import BDSpaceVis as Visual
# Create cartesian coordinate system
CS = Cartesian()
convention = Conventions().get_convention('Bunge')
CS.euler_angles_convention = convention
# to visualise the coordinate system basis the module Visual is used
fig = mlab.figure('CS demo', bgcolor=(0.5, 0.5, 00.5)) # Create the mayavi figure
#Visual.draw_coordinate_system_axes(fig, CS)
cube_surface, arrows, labels = Visual.draw_coordinate_system_box(fig, CS, scale=1)
CS.rotate_euler_angles( | np.array([np.pi, 0, 0]) | numpy.array |
# <NAME>
# Sarupria Research Group
# Clemson University
# 2019 Jun 11
import sys
import os
import math
import numpy as np
import argparse
import MDAnalysis as mda
# For Cython nlist
# PROVIDE AN ABSOLUTE PATH TO MAKE THIS IMPORT WORK
# FROM WHEREVER YOU ARE EXECUTING THIS CODE
sys.path.append('/home/rdefeve/repos/stride-upload/mda_custom')
import nsgrid.nsgrid_rsd as nsgrid
def main():
# Argument parsing
args = get_args()
# List of all classes
classes = ['liq','fcc','hcp','bcc']
nclass = len(classes)
# Get list of all subdirectories
dirs = [d for d in os.listdir(args.path) if os.path.isdir("%s/%s" % (args.path, d))]
# Narrow directory list to specified classes
# Assumes (T,P) conditions in directories named 'phase_pPPPtTTT'
dirs = [d for d in dirs if d.split("_")[0] in classes]
# Get all files
files_gro = ["%s/prod.gro" % d for d in dirs]
files_xtc = ["%s/prod.xtc" % d for d in dirs]
assert len(files_gro) == len(files_xtc), \
"Error, unequal number of .gro and .xtc files found"
# Initialize lists for samples and labels
samples = []
labels = []
# Initialize values for mean/stdev of nneigh
count = 0
mean = 0
m2 = 0
# Read in data for each file
for fcount in range(len(files_gro)):
# Progress
print("Reading file: %s" % files_gro[fcount])
# Extract classid and create label
classid = files_gro[fcount].split("_")[0]
ndx = classes.index(classid)
label = np.zeros(len(classes))
label[ndx] = 1
# Import topology
u = mda.Universe(files_gro[fcount],
files_xtc[fcount])
# Loop over trajectory
for ts in u.trajectory:
# Select atoms at random for samples
sel = np.random.choice(u.atoms.n_atoms,size=args.n_select,replace=False)
#sel = [13518]
#print(sel)
# Create neighbor list for atoms
nlist = nsgrid.FastNS(args.cutoff*10.0,u.atoms.positions,ts.dimensions).search(u.atoms[sel].positions)
ndxs = nlist.get_indices()
dxs = nlist.get_dx()
dists = nlist.get_distances()
for i in range(len(sel)):
np_dxs = | np.asarray(dxs[i]) | numpy.asarray |
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable, MultivariateNormalVariable
from brancher import inference
import brancher.functions as BF
# Probabilistic model #
T = 50
driving_noise = 1.
measure_noise = 0.3
x0 = NormalVariable(0., driving_noise, 'x0')
y0 = NormalVariable(x0, measure_noise, 'y0')
b = BetaVariable(5., 5., 'b')
x = [x0]
y = [y0]
x_names = ["x0"]
y_names = ["y0"]
y_range = range(T)
for t in range(1, T):
x_names.append("x{}".format(t))
x.append(NormalVariable(b * x[t - 1], driving_noise, x_names[t]))
if t in y_range:
y_name = "y{}".format(t)
y_names.append(y_name)
y.append(NormalVariable(x[t], measure_noise, y_name))
AR_model = ProbabilisticModel(x + y)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
true_b = data[b].data
print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]
# Structured variational distribution #
Qb = BetaVariable(5., 5., "b", learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
Qlambda = [RootVariable(0., 'x0_lambda', learnable=True)]
for t in range(1, T):
Qx_mean.append(RootVariable(0., x_names[t] + "_mean", learnable=True))
Qlambda.append(RootVariable(0., x_names[t] + "_lambda", learnable=True))
Qx.append(NormalVariable(BF.sigmoid(Qlambda[t])*Qb*Qx[t - 1] + (1 - BF.sigmoid(Qlambda[t]))*Qx_mean[t], 1., x_names[t], learnable=True))
variational_posterior = ProbabilisticModel([Qb] + Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
N_iter = 400
n_samples = 10
optimizer = "Adam"
lr = 0.01
inference.perform_inference(AR_model,
number_iterations=N_iter,
number_samples=n_samples,
optimizer=optimizer,
lr=lr)
loss_list = AR_model.diagnostics["loss curve"]
# ELBO
ELBO = AR_model.estimate_log_model_evidence(15000)
print("The ELBO is {}".format(ELBO))
# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
b_posterior_samples = posterior_samples[b].detach().numpy().flatten()
b_mean = np.mean(b_posterior_samples)
b_sd = np.sqrt(np.var(b_posterior_samples))
x_mean = []
lower_bound = []
upper_bound = []
for xt in x:
x_posterior_samples = posterior_samples[xt].detach().numpy().flatten()
mean = np.mean(x_posterior_samples)
sd = np.sqrt(np.var(x_posterior_samples))
x_mean.append(mean)
lower_bound.append(mean - sd)
upper_bound.append(mean + sd)
print("The estimated coefficient is: {} +- {}".format(b_mean, b_sd))
# Two subplots, unpack the axes array immediately
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.scatter(y_range, time_series, c="k")
ax1.plot(range(T), x_mean)
ax1.plot(range(T), ground_truth, c="k", ls ="--", lw=1.5)
ax1.fill_between(range(T), lower_bound, upper_bound, alpha=0.5)
ax1.set_title("Time series")
ax2.plot(np.array(loss_list))
ax2.set_title("Convergence")
ax2.set_xlabel("Iteration")
ax3.hist(b_posterior_samples, 25)
ax3.axvline(x=true_b, lw=2, c="r")
ax3.set_title("Posterior samples (b)")
ax3.set_xlim(0, 1)
plt.show()
# Mean-field variational distribution #
Qb = BetaVariable(5., 5., "b", learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
for t in range(1, T):
Qx.append(NormalVariable(0, 1., x_names[t], learnable=True))
variational_posterior = ProbabilisticModel([Qb] + Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_iter,
number_samples=n_samples,
optimizer=optimizer,
lr=lr)
loss_list = AR_model.diagnostics["loss curve"]
# ELBO
ELBO = AR_model.estimate_log_model_evidence(15000)
print("The ELBO is {}".format(ELBO))
# Statistics
posterior_samples = AR_model._get_posterior_sample(2000)
b_posterior_samples = posterior_samples[b].detach().numpy().flatten()
b_mean = | np.mean(b_posterior_samples) | numpy.mean |
import argparse
import sys
from io import BytesIO
import cv2
# from lightning_logs.basic_model import ImageCompression
import numpy
from model import ImageCompression
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader, random_split
from torchvision import datasets
from torchvision.transforms import transforms
from dataset import Celeb
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
import pickle
import imgaug.augmenters as iaa
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path')
args = parser.parse_args()
# image = cv2.imread(args.path, cv2.IMREAD_GRAYSCALE)
# model = ImageCompression()
# model.load_state_dict(torch.load('modelLast.pth'))
# model.eval()
# loss = torch.nn.MSELoss()
# transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
# pil = transforms.ToPILImage()
model = ImageCompression.load_from_checkpoint(
'lightning_logs/version_0/epoch=32-step=58937.ckpt')
model.eval()
loss = torch.nn.MSELoss()
pil = transforms.ToPILImage()
totensor = transforms.ToTensor()
def save(img, path):
f = open(path, "wb")
pickle.dump(img, f)
f.close()
if args.path == None:
# import ipdb;ipdb.set_trace()
train_data = Celeb(train=True)
test_data = Celeb(train=False)
train_dataLoader = DataLoader(train_data, batch_size=1, shuffle=True)
test_dataLoader = DataLoader(test_data, batch_size=1, shuffle=True)
for x in test_dataLoader:
images = x
x0 = model(images)
out = (x0+1)/2
output = pil(out.squeeze(0))
print("max :", out.max().item(), "min :", out.min().item())
print('loss :', loss(x0, images))
print('ssim :', ssim(x0, images, data_range=1, size_average=True))
print("________________________\n")
orig = pil((x.squeeze(0)+1)/2)
show = np.hstack((np.array(orig), | np.array(output) | numpy.array |
import numpy as np
import cv2
from scipy.interpolate import interpn
from scipy.ndimage import zoom
import matplotlib as mpl
mpl.use("Agg")
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
def plot_heatmap_img(batch_segments,
batch_vis_views,
batch_scales,
batch_conv_vis,
batch_descriptor,
sess,
cnn_descriptor,
cnn_input,
cnn_input_vis,
cnn_scales
):
# MulRan
intensity = batch_vis_views[:, :, :, 0] * 173.09 + 209.30
# KITTI
# intensity = (batch_vis_views[:, :, :, 0] * 8297.86 + 19020.73) * 1500.0 / 65535.0
mask = batch_vis_views[:, :, :, 1] * 255.0
weights = np.zeros((batch_conv_vis.shape[0], batch_conv_vis.shape[3]), dtype=np.float)
for layer in range(batch_conv_vis.shape[3]):
l_heatmap = batch_conv_vis[:, :, :, layer]
cur_batch_vis_views = np.zeros(batch_vis_views.shape, dtype=batch_vis_views.dtype)
for b in range(l_heatmap.shape[0]):
cur_heatmap = cv2.resize(l_heatmap[b], (intensity.shape[2], intensity.shape[1]))
cur_heatmap = np.maximum(cur_heatmap, 0.0)
minh = np.min(cur_heatmap)
maxh = np.max(cur_heatmap)
cur_heatmap = (cur_heatmap - minh) / max(maxh - minh, 1e-4)
cur_batch_vis_views[b, :, :, :] = batch_vis_views[b, :, :, :] * np.expand_dims(cur_heatmap,
axis=-1)
[cur_batch_descriptor] = sess.run(
[cnn_descriptor],
feed_dict={
cnn_input: batch_segments,
cnn_input_vis: cur_batch_vis_views,
cnn_scales: batch_scales,
},
)
weights[:, layer] = np.linalg.norm(batch_descriptor - cur_batch_descriptor, axis=-1)
weights = 1.0 / np.maximum(weights, 1e-4)
scores = np.mean(weights, axis=-1)
weights = weights / np.sum(weights, axis=-1, keepdims=True)
heatmap = batch_conv_vis * weights[:, np.newaxis, np.newaxis, :]
heatmap = np.sum(heatmap, axis=-1)
img_heatmap_np = np.zeros(intensity.shape + (3,), dtype=np.uint8)
val_heatmap_np = np.zeros(intensity.shape, dtype=np.float)
for b in range(heatmap.shape[0]):
cur_heatmap = cv2.resize(heatmap[b], (intensity.shape[2], intensity.shape[1]))
cur_heatmap = np.maximum(cur_heatmap, 0.0)
cur_heatmap = cur_heatmap / np.max(cur_heatmap)
cur_heatmap_color = cv2.applyColorMap(np.uint8(255 * cur_heatmap), cv2.COLORMAP_JET)
cur_heatmap_color = cv2.cvtColor(cur_heatmap_color, cv2.COLOR_BGR2RGB)
cur_img_heatmap = 0.25 * cur_heatmap_color + \
0.75 * np.tile(np.expand_dims(np.minimum(intensity[b] * 255.0 / 1500.0, 255.0), axis=-1),
[1, 1, 3])
# cur_img_heatmap[mask[b] > 128.0, :] = np.array([255, 0, 0], dtype=np.uint8)
img_heatmap_np[b] = cur_img_heatmap
val_heatmap_np[b] = cur_heatmap
return img_heatmap_np, val_heatmap_np, scores
def resize_voxels(voxels, size):
# x = np.linspace(0, voxels.shape[0] - 1, num=voxels.shape[0])
# y = np.linspace(0, voxels.shape[1] - 1, num=voxels.shape[1])
# z = np.linspace(0, voxels.shape[2] - 1, num=voxels.shape[2])
voxels_resized = zoom(voxels, (size[0] / voxels.shape[0],
size[1] / voxels.shape[1],
size[2] / voxels.shape[2]))
return voxels_resized
def plot_heatmap_vox(batch_segments,
batch_vis_views,
batch_classes,
batch_scales,
batch_conv,
batch_descriptor,
sess,
cnn_descriptor,
cnn_input,
cnn_input_vis,
cnn_y_true,
cnn_scales,
height,
width
):
weights = np.zeros((batch_conv.shape[0], batch_conv.shape[-1]), dtype=np.float)
for layer in range(batch_conv.shape[-1]):
l_heatmap = batch_conv[:, :, :, :, layer]
cur_batch_segments = np.zeros(batch_segments.shape, dtype=batch_segments.dtype)
for b in range(l_heatmap.shape[0]):
cur_heatmap = resize_voxels(l_heatmap[b], (batch_segments.shape[1:-1]))
cur_heatmap = np.maximum(cur_heatmap, 0.0)
minh = np.min(cur_heatmap)
maxh = | np.max(cur_heatmap) | numpy.max |
import numpy as np; from collections import Counter
crab_dist = Counter(np.fromfile("day7.txt", dtype=int, sep=","))
upper = max(crab_dist.keys())
cost = | np.zeros((upper,), dtype=int) | numpy.zeros |
#!/usr/bin/env python3
#
# (C) 2018-2020 <NAME>
import sys, argparse
import numpy as np
sys.path.append('../')
import PetscBinaryIO # may need link
def readcoordinates(io,vech):
objecttype = io.readObjectType(vech)
if objecttype == 'Vec':
xy = io.readVec(vech)
N = len(xy)
if N % 2 != 0:
print('ERROR: nodes in .vec file not of even length ... stopping')
sys.exit()
N = int(N/2)
xy = | np.reshape(xy,(N,2)) | numpy.reshape |
import qt
import numpy as np
import os
import shutil
import sys
import progressbar
from constants import *
def copy_script(once):
if once:
shutil.copy2('%s'%sys.argv[0],'%s/%s'%(data.get_filepath()[:-(len(data.get_filename())+1)],os.path.basename(sys.argv[0])))
fsv = qt.instruments.create('FSV', 'RhodeSchwartz_FSV', address = FSV_ADDRESS)
znb = qt.instruments.create('ZNB20', 'RhodeSchwartz_ZNB20', address=ZNB20_ADDRESS, reset= True)
smf = qt.instruments.create('SMF100', 'RhodeSchwartz_SMF100', address = SMF100_ADDRESS)
target_value = -79.1-1
# How many traces
no_of_traces = 200
## FSV parameters
center_frequency = 6025.*MHz
span = 200*Hz
RBW = 1*Hz
numpoints = 501
ref_level = -70 #dBm
#### SMF parameters
smf_freq = 6025*MHz - 6.583815*MHz
smf_power = -0.5
# VNA
probe_center = center_frequency
probe_span = 80*MHz
probe_numpoints = 201
if_bw_0 = 100*Hz
probe_power = 0
# two_probe_span = 2*Hz
two_probe_numpoints = 1
two_if_bw_1 = 2*Hz
two_probe_power = 0
# Prepare FSV
fsv.set_centerfrequency(center_frequency)
fsv.set_span(span)
fsv.set_bandwidth(RBW)
fsv.set_sweep_points(numpoints)
fsv.set_referencelevel(ref_level)
# Prepare SMF
smf.set_frequency(smf_freq)
smf.set_source_power(smf_power)
smf.rf_on()
# Setup VNA
znb.add_trace('S21')
znb.set_external_reference(True)
znb.set_source_power(probe_power)
znb.rf_on()
znb.set_sweep_mode('single')
def check_cavity():
znb.set_source_power(probe_power)
znb.set_center_frequency(probe_center)
znb.set_numpoints(probe_numpoints)
znb.set_span(probe_span)
znb.set_if_bandwidth(if_bw_0)
znb.send_trigger(wait=True)
znb.autoscale()
def cw_setup():
znb.rf_on()
znb.set_source_power(two_probe_power)
znb.set_numpoints(two_probe_numpoints)
znb.set_center_frequency(probe_center)
# znb.set_span(two_probe_span)
znb.set_if_bandwidth(two_if_bw_1)
znb.send_trigger(wait=True)
znb.autoscale()
dummy = znb.get_data('S21')
return 20*np.log10(np.abs(dummy[0]))
def thermal():
znb.set_source_power(-60)
znb.rf_off()
### SETTING UP DATA FILE
data_file_name = raw_input('Enter name of data file: ')
data=qt.Data(name=data_file_name)
data.add_coordinate('counter', units='nothing')
data.add_coordinate('Frequency', units='Hz')
data.add_value('PSD', units = 'dBm')
data.add_value('MP', units = 'dBm')
incr = np.arange(no_of_traces)
in_meta = [center_frequency - span/2, center_frequency + span/2, numpoints, 'Frequency (Hz)']
out_meta = [no_of_traces, 1.0, no_of_traces,'Counter']
once = True
value = 0
check_cavity()
while 1<2:
print(value)
value_array = np.linspace(value, value, numpoints)
thermal()
fsv.run_single()
trace= fsv.get_data()
measure_peak = cw_setup()
print(measure_peak)
if measure_peak > target_value:
mp = | np.linspace(measure_peak, measure_peak, numpoints) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
Xarray Stacked Images Writer.
Create 3D datasets, allows setting spatial and temporal subset (images and time
series)
"""
#TODO. File locking as option for multiple processes?
# todo: Add Point data results manager (for ismn based results)
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
import warnings
import matplotlib.pyplot as plt
import os
from io_utils.utils import safe_arange
from pygeogrids.grids import BasicGrid, CellGrid, lonlat2cell
import copy
from io_utils.write.utils import minmax
def to_reg_cell_grid(grid, cellsize=5.):
"""
Create RegularCellGrid from BasicGrid or CellGrid
Parameters
----------
grid : CellGrid or BasicGrid
Input grid to convert
cellsize : float, optional (default: 5.)
Cell size of the CellGrid to create.
Returns
-------
grid : RegularCellGrid
A regularly gridded CellGrid
"""
if isinstance(grid, RegularCellGrid) and (grid.cellsize == cellsize):
return grid
return RegularCellGrid(grid.arrlon, grid.arrlat, cellsize, gpis=grid.gpis,
subset=grid.subset, shape=grid.shape)
class Point(object):
""" Helper class to combine lon and lat in one Point """
def __init__(self, lon, lat):
if lat > 90. or lat <-90:
raise IOError('{} is out of valid bounds (+-90) for Latitude'.format(lat))
if lon > 180. or lon <-180:
raise IOError('{} is out of valid bounds (+-180) for Longitude'.format(lon))
self.__lon, self.__lat = lon, lat
self.__loc = (lon, lat)
def __str__(self):
return 'Lon: {}, Lat: {}'.format(self.lon, self.lat)
@property
def lon(self):
return self.__lon
@property
def lat(self):
return self.__lat
@property
def loc(self):
return (self.lon, self.lat)
class RegularCellGrid(CellGrid):
# Special for of a Cell Grid that has equal spacing between grid points
def __init__(self, lon, lat, cellsize=5., gpis=None, geodatum='WGS84',
subset=None, setup_kdTree=False, **kwargs):
self.cellsize = cellsize
cells = lonlat2cell(lon, lat, cellsize=cellsize)
super(RegularCellGrid, self).__init__(lon, lat, cells, gpis, geodatum,
subset=subset, setup_kdTree=setup_kdTree,
**kwargs)
self.dx, self.dy = self._grid_space()
def _grid_space(self):
# find the resolution of the grid and check if it is regular along x and y
lons, lats = self.get_grid_points()[1], self.get_grid_points()[2]
diff_x = np.around(np.diff(sorted(np.unique(lons))), 10)
diff_y = np.around(np.diff(sorted(np.unique(lats))), 10)
dx = np.max(diff_x)
assert np.min(diff_x) == dx
dy = np.max(diff_y)
assert np.min(diff_y) == dy
assert np.all(diff_x == dx)
assert np.all(diff_y == dy)
return dx, dy
class RegularArea(object):
""" Helper class to combine lons and lats that span an Area """
def __init__(self, llc, urc, grid):
"""
Create an regularly gridded 2d Area.
Parameters
----------
llc : Point
Lower left corner point of the Area
urc : Point
Upper right corner point of the Area
grid : BasicGrid or CellGrid
An independent grid that the area is a subset of.
"""
self.grid = to_reg_cell_grid(grid)
self.llc = llc
self.urc = urc
self.subset = self._subset_from_corners()
def _subset(self, llc, urc):
ind = np.where((self.grid.activearrlon >= llc.lon) &
(self.grid.activearrlon <= urc.lon) &
(self.grid.activearrlat >= llc.lat) &
(self.grid.activearrlat <= urc.lat))
gpis = self.grid.activegpis[ind]
lons = self.grid.activearrlon[ind]
lats = self.grid.activearrlat[ind]
return gpis, lons, lats
def _subset_from_corners(self):
self._assert_corners()
gpis, lons, lats = self._subset(self.llc, self.urc)
subset = self.grid.subgrid_from_gpis(gpis)
subset.shape = (np.unique(lats).size, np.unique(lons).size)
return subset
def _assert_corners(self):
# check if the corner points are also in the grid
assert self.llc.lon in self.grid.get_grid_points()[1]
assert self.llc.lat in self.grid.get_grid_points()[2]
def as_slice(self, d=False):
"""
Create a lon and lat slice of the Area.
Parameters
---------
d : bool, optional (default: False)
Include step size in slice
Returns
-------
lon_slice : slice
Slice across the area
lat_slice : slice
Slice across the area
"""
return slice(self.llc.lon, self.urc.lon, self.grid.dx if d else None), \
slice(self.llc.lat, self.urc.lat, self.grid.dy if d else None)
class NcRegGridStack(object):
""" Store netcdf cubes with xarray and dask """
def __init__(self, dx=0.25, dy=0.25, z=None, z_name='z',
llc=Point(-179.875, -89.875), urc=Point(179.875, 89.875),
indexed=True, zlib=True, fill_value=9999.):
"""
Parameters
----------
dx : float, optional (default: 0.25)
Regular spacing in x/lon direction
dy : float, optional (default: 0.25)
Regular spacing in y/lat direction
z : np.array
Z Values, e.g. Timestamps (z dimension of cube)
z_name : str, optional (default: time)
Name of the z dimension (e.g. time or depth)
llc : Point, optional (default: Point(-179.875, -89.875))
Lower left corner point of the dataset area.
urc : Point, optional (default: Point(179.875, 89.875))
Upper right corner point of the dataset area.
indexed : bool, optional (default: True)
Add a 2d variable of unique index to each point of the dataset.
zlib : bool, optional (default: True)
Compress data when writing to netcdf
fill_value : float, optional (default: 9999.)
Fill value nans are replaced with
"""
if z is None:
z = [None]
self.zlib = zlib
self.z_name = z_name
self.fill_value = fill_value
self.llc, self.urc = llc, urc
lons, lats = self._coords(dx, dy)
self.shape = (z.size, lats.size, lons.size)
gpis = self._gpis('ll') # origin is in the lower left
self.ds = xr.Dataset(
data_vars={'gpi': (['lat', 'lon'], gpis)} if indexed else None,
coords={'lon': lons, 'lat': lats, self.z_name: z})
self.grid = to_reg_cell_grid(self._grid(gpis), 5.)
@property
def subset(self):
return (self.llc, self.urc)
def _grid(self, gpis):
# create a pygeogrids object
lons, lats = np.meshgrid(self.ds.lon.values, np.flipud(self.ds.lat.values))
lons, lats = lons.flatten(), lats.flatten()
grid = BasicGrid(lons, lats, gpis=gpis.flatten()).to_cell_grid(5.)
return grid
def _gpis(self, origin='ll'):
"""
Parameters
---------
origin : str, optional (Default: 'll')
String indication where gpi=0 is.
ll = lower left, ur=upper right, lr = lower right, ul = upper left
Returns
---------
gpis : np.ndarray
Array of GPIs
"""
origins = ['ll', 'lr', 'ul', 'ur']
if origin not in origins:
raise NotImplementedError(
"Origin {} not implemented. Choose one of: {}"
.format(origin, ','.join(origins)))
n = self.shape[1] * self.shape[2]
gpis = np.arange(n).reshape(self.shape[1], self.shape[2])
if origin[0] == 'l':
gpis = np.flipud(gpis)
if origin[1] == 'r':
gpis = np.fliplr(gpis)
return gpis
def _coords(self, dx, dy):
""" Build coord range with chosen resolution over dataset area """
lons = safe_arange(self.llc.lon, self.urc.lon+dx, dx)
lats = safe_arange(self.llc.lat, self.urc.lat+dy, dy)
self.dx, self.dy = dx, dy
return lons, lats
def _add_empty_3d(self, name):
# add a empty variable with z dimension of the passed name
#print('Add empty 3D variable {}'.format(name))
self.ds[name] = \
xr.DataArray(np.full(self.shape, self.fill_value),
dims=[self.z_name, 'lat', 'lon'],
coords=[self.ds[self.z_name], self.ds.lat, self.ds.lon])
def _write_img(self, data, **kwargs):
"""
Write area to dataset.
Parameters
----------
data : xr.Dataset, 2d arrays to write, keys are variable names
"""
for var in list(data.data_vars.keys()):
if var not in self.ds.variables:
self._add_empty_3d(var)
self.ds[var].loc[dict(**kwargs)] = data[var]
def _write_ser(self, data, **kwargs):
"""
Write (time) series of multiple variables in data frame
"""
for var in data.keys():
if var not in self.ds.variables:
self._add_empty_3d(var)
assert data[var].size == self.ds[self.z_name].size
dat = data[var]
dat[np.isnan(dat)] = self.fill_value
self.ds[var].loc[dict(**kwargs)] = dat
def _write_pt(self, data, **kwargs):
# takes arrays of lon, lat, z and data dict of arrays
for var in data.keys():
if var not in self.ds.variables:
self._add_empty_3d(var)
dat = data[var]
dat[np.isnan(dat)] = self.fill_value
self.ds[var].loc[dict(**kwargs)] = dat
def store_stack(self, filename=None, global_attrs=None, dtypes=np.float32):
"""
Write down xarray cute to netcdf file
Parameters
----------
filename : str
Path to the stack file to write
global_attrs : dict, optional (default: None)
Global attributes
dtypes : np.float32
Data types of results, affects compression.
"""
if global_attrs is None:
global_attrs = {}
self.ds = self.ds.assign_attrs(global_attrs)
try:
if self.zlib:
encoding = {}
for var in self.ds.variables:
if var not in ['lat', 'lon', self.z_name]:
encoding[var] = {'complevel': 9, 'zlib': True,
'dtype': dtypes,
'_FillValue': self.fill_value}
else:
encoding = None
self.ds.to_netcdf(filename, engine='netcdf4', encoding=encoding)
except: # todo: specifiy exception
warnings.warn('Compression failed, store uncompressed results.')
self.ds.to_netcdf(filename, engine='netcdf4')
self.ds.close()
def store_files(self, path, filename_templ='file_{}.nc',
dtypes=np.float32):
"""
filename_templ :
{} is replaced by the z indicator (strftime(z) if z is a date time).
"""
# todo: add option to append to existing file (memory dump)
# todo: integrate with the other function
if self.zlib:
encoding = {}
for var in self.ds.variables:
if var not in ['lat', 'lon', self.z_name]:
encoding[var] = {'complevel': 9, 'zlib': True,
'dtype': dtypes,
'_FillValue': self.fill_value}
else:
encoding = None
datetime_obs = [np.datetime64, datetime]
for z in self.ds[self.z_name]:
if any([isinstance(z.values, dt) for dt in datetime_obs]):
pydatetime=pd.to_datetime(z.values).to_pydatetime()
datestr = datetime.strftime(pydatetime, '%Y%m%d')
filename = filename_templ.format(datestr)
else:
filename = filename_templ.format(str(z.values))
try:
self.ds.loc[{self.z_name: z}].to_netcdf(os.path.join(path, filename),
engine='netcdf4', encoding=encoding)
except: # todo: specifiy exception
warnings.warn('Compression failed, store uncompressed results.')
self.ds.loc[{self.z_name: z}].to_netcdf(os.path.join(path, filename),
engine='netcdf4')
self.ds.close()
def _subset_area(self, llc, urc):
"""
Read subset of current dataset:
Parameters
----------
llc : Point
Lower left corner point of area to read
urc : Point
Upper right corner point of area to read
Returns
---------
subset : xr.Dataset
Spatial subset of the current Dataset.
"""
lon_slice, lat_slice = RegularArea(llc, urc, self.grid).as_slice(True)
subset = self.ds.loc[dict(lon=lon_slice, lat=lat_slice)]
return subset
def _df2arr(self, df, llc:Point, urc:Point, lon_name:str, lat_name:str):
# get the lon and lat extent from df and create a 2d array#
local_df = df.copy(True)
area = RegularArea(llc, urc, self.grid)
local_df = local_df.set_index([lat_name, lon_name]).sort_index()
_, subset_lons, subset_lats, _ = area.subset.get_grid_points()
full_df = pd.DataFrame(data={'lon': subset_lons, 'lat': subset_lats})
full_df = full_df.set_index([lat_name, lon_name]).sort_index()
for k in local_df.columns:
full_df[k] = self.fill_value
full_df.loc[local_df.index] = local_df
arr = full_df.to_xarray()
slice_lon, slice_lat = area.as_slice(False)
return arr, slice_lon, slice_lat
def spatial_subset(self, llc, urc, in_place=False):
"""
Cut the current data set to a new subset
"""
if in_place:
self.ds = self._subset_area(llc, urc)
return self.ds
else:
return self._subset_area(llc, urc)
def write_image(self, df, z=None, lat_name='lat', lon_name='lon'):
"""
Add data for multiple locations at a specific point in z (e.g. time stamp)
Parameters
----------
z : int or float or str
Index in z-dimension (e.g. time stamp)
df : pd.DataFrame
DataFrame that contains image points.
lat_name : str, optional (default: 'lat')
Name of the latitude variable in the data frame.
lon_name : str, optional (default: 'lon')
Name of the longitude variable in the data frame.
"""
if z not in self.ds[self.z_name].values:
raise ValueError('{} was not found in the {} dimension.'
.format(z, self.z_name))
min_lon, max_lon = minmax(df[lon_name].values)
min_lat, max_lat = minmax(df[lat_name].values)
llc = Point(min_lon, min_lat)
urc = Point(max_lon, max_lat)
data, slice_lon, slice_lat = self._df2arr(df, llc, urc, lon_name, lat_name)
kwargs = {'lon' : slice_lon, 'lat' : slice_lat, self.z_name : z}
self._write_img(data, **kwargs)
def write_series(self, lon, lat, df):
"""
Add data for multiple z values, for a single location. Series is in z-dimension.
Parameters
----------
lon : float
Longitude of point to write data for
lat : float
Latitude of point to write data for
df : pd.DataFrame
DataFrame with variables in columns and z-dimension values as index.
"""
index = df.index.to_numpy()
data = {k : df[k].values for k in df.columns}
if not np.all(np.equal(index, self.ds[self.z_name].values)):
raise IndexError('Index in the passed data'
' frame do not correspond with z values of dataset')
self._write_ser(data, lon=lon, lat=lat)
def write_point(self, lon, lat, z, data):
"""
Add data for a single point and a single z value.
Parameters
---------
lon : np.array or list (1d)
Longitude of the point to write, same size as lat and z
lat : np.array or list (1d)
Latitude of the point to write, same size as lon and z
z : np.array or list (1d)
3rd dimension value of the point to write, same size as lon and lat
data : dict
Dictionary of variables and values to write.
Values must have same size as lon, lat and z.
"""
data = copy.deepcopy(data)
if not isinstance(lon, (np.ndarray, list)):
lon = np.array([lon])
if not isinstance(lat, (np.ndarray, list)):
lat = np.array([lat])
if not isinstance(z, (np.ndarray, list)):
z = | np.array([z]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# Solve differential flatness and check feasibility of control command
# Use NED coordinate
import os, sys, time, copy, yaml
import numpy as np
from .utils import *
# import cupy as cp
class QuadModel:
def __init__(self, cfg_path=None, drone_model=None):
if cfg_path == None:
curr_path = os.path.dirname(os.path.abspath(__file__))
cfg_path = curr_path+"/../config/multicopter_model.yaml"
if drone_model == None:
drone_model="default"
with open(cfg_path, 'r') as stream:
try:
cfg = yaml.safe_load(stream)
self.thrustCoef = np.double(cfg['motor_model']['thrust_coefficient'])
self.torqueCoef = np.double(cfg['motor_model']['torque_coefficient'])
self.armLength = np.double(cfg['motor_model']['moment_arm'])
self.mass = np.double(cfg['uav_model'][drone_model]['vehicle_mass'])
self.Ixx = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_xx'])
self.Iyy = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_yy'])
self.Izz = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_zz'])
self.w_max = np.double(cfg['motor_model']['max_prop_speed'])
self.w_min = np.double(cfg['motor_model']['min_prop_speed'])
self.gravity = np.double(cfg['simulation']['gravity'])
self.w_sta = np.sqrt(self.mass*self.gravity/self.thrustCoef/4.0)
except yaml.YAMLError as exc:
print(exc)
lt = self.armLength*self.thrustCoef
k0 = self.torqueCoef
k1 = self.thrustCoef
self.G1 = np.array([[lt,-lt,-lt,lt],\
[lt,lt,-lt,-lt],\
[-k0,k0,-k0,k0],\
[-k1,-k1,-k1,-k1]])
self.J = np.diag(np.array([self.Ixx,self.Iyy,self.Izz]))
return
def getWs(self, status):
pos = np.array(status[0:3])
vel = np.array(status[3:6])
acc = np.array(status[6:9])
jer = np.array(status[9:12])
sna = np.array(status[12:15])
yaw = status[15]
dyaw = status[16]
ddyaw = status[17]
# Total thrust
tau_v = acc - np.array([0,0,self.gravity])
tau = -np.linalg.norm(tau_v)
bz = tau_v/tau
Thrust = self.mass*tau
# roll & pitch
roll = np.arcsin(np.dot(bz,[np.sin(yaw),-np.cos(yaw),0]))
pitch = np.arctan(np.dot(bz,[np.cos(yaw),np.sin(yaw),0])/bz[2])
bx = np.array([np.cos(yaw)*np.cos(pitch),np.sin(yaw)*np.cos(pitch),-np.sin(pitch)])
by = np.array([-np.sin(yaw)*np.cos(roll)+np.cos(yaw)*np.sin(pitch)*np.sin(roll),\
np.cos(yaw)*np.cos(roll)+np.sin(yaw)*np.sin(pitch)*np.sin(roll),\
np.cos(pitch)*np.sin(roll)])
# dzhi & Omega
dzhi = np.dot(np.array([-1*by,bx/np.cos(roll),np.zeros(3)]),jer)/tau \
+np.array([np.sin(pitch),-np.cos(pitch)*np.tan(roll),1])*dyaw
S_inv = np.array([[1,0,-np.sin(pitch)],\
[0,np.cos(roll),np.cos(pitch)*np.sin(roll)],\
[0,-np.sin(roll),np.cos(pitch)*np.cos(roll)]])
Omega = np.dot(S_inv,dzhi)
C_inv = np.array([-1*by/tau,bx/np.cos(roll)/tau,bz])
d = np.array([np.cos(yaw)*np.sin(roll)-np.cos(roll)*np.sin(yaw)*np.sin(pitch),\
np.sin(yaw)*np.sin(roll)+np.cos(roll)*np.cos(yaw)*np.sin(pitch),0])*tau
dtau = np.dot(bz,jer-dyaw*d)
# ddzhi & dOmega
dS = np.array([[0,np.cos(roll)*np.tan(pitch),-np.sin(roll)*np.tan(pitch)],\
[0,-np.sin(roll),-np.cos(roll)],\
[0,np.cos(roll)/np.cos(pitch),-np.sin(roll)/np.cos(pitch)]])*dzhi[0]\
+np.array([[0,np.sin(roll)/np.cos(pitch)/np.cos(pitch),np.cos(roll)/np.cos(pitch)/np.cos(pitch)],\
[0,0,0],\
[0,np.sin(roll)*np.tan(pitch)/np.cos(pitch),np.cos(roll)*np.tan(pitch)/np.cos(pitch)]])*dzhi[1]
e = 2*dtau*np.dot(np.array([-1*by,bx,0]).T,Omega)\
+tau*np.dot(np.array([bx,by,bz]).T,np.array([Omega[0]*Omega[2],Omega[1]*Omega[2],-Omega[0]*Omega[0]-Omega[1]*Omega[1]]))\
-tau*np.dot(np.array([-1*by,bx,0]).T,np.dot(S_inv,np.dot(dS,Omega)))
ddzhi = np.dot(C_inv,sna-ddyaw*d-e)
ddzhi[2] = ddyaw
dOmega = -np.dot(S_inv,np.dot(dS,Omega))+np.dot(S_inv,ddzhi)
Mu = np.dot(self.J,dOmega) + np.cross(Omega,np.dot(self.J,Omega))
MT = np.zeros(4)
MT[:3] = Mu
MT[3] = Thrust
G1_inv = np.linalg.inv(self.G1)
Ws2 = np.dot(G1_inv,MT)
# Ws2 = np.clip(Ws2, np.power(self.w_min,2), np.power(self.w_max,2))
# Ws = np.sqrt(Ws2)
Ws = np.copysign(np.sqrt(np.abs(Ws2)),Ws2)
rpy = np.array([roll, pitch, yaw])
rpy_q = Euler2quat(np.array([roll, pitch, yaw]))
state = {
'roll':roll,
'pitch':pitch,
'rpy':rpy,
'rpy_q':rpy_q,
'dzhi':dzhi,
'ddzhi':ddzhi,
'ut':MT
}
return Ws, state
def getWs_vector(self, status):
pos = np.array(status[:,0:3])
vel = np.array(status[:,3:6])
acc = np.array(status[:,6:9])
jer = np.array(status[:,9:12])
sna = np.array(status[:,12:15])
yaw = np.array(status[:,15:16])
dyaw = np.array(status[:,16:17])
ddyaw = np.array(status[:,17:18])
# Total thrust
tau_v = acc - np.array([0,0,self.gravity])
tau = -np.linalg.norm(tau_v,axis=1)[:,np.newaxis]
bz = tau_v/tau
Thrust = self.mass*tau
# roll & pitch
roll = np.arcsin(np.einsum('ij,ij->i', bz,
np.concatenate((
np.sin(yaw),
-np.cos(yaw),
np.zeros_like(yaw)),axis=1)))[:,np.newaxis]
pitch = np.arctan(np.einsum('ij,ij->i', bz,
np.concatenate((
np.cos(yaw)/bz[:,2:3],
np.sin(yaw)/bz[:,2:3],
np.zeros_like(yaw)),axis=1)))[:,np.newaxis]
bx = np.concatenate((
np.cos(yaw)*np.cos(pitch),
np.sin(yaw)*np.cos(pitch),
-np.sin(pitch)),axis=1)
by = np.concatenate((
-np.sin(yaw)*np.cos(roll)+np.cos(yaw)*np.sin(pitch)*np.sin(roll),
np.cos(yaw)*np.cos(roll)+np.sin(yaw)*np.sin(pitch)*np.sin(roll),
np.cos(pitch)*np.sin(roll)),axis=1)
# dzhi & Omega
dzhi = np.einsum('ijk,ij->ik',
np.concatenate((
-by[:,:,np.newaxis],
(bx/np.cos(roll))[:,:,np.newaxis],
np.zeros_like(by[:,:,np.newaxis])),axis=2),jer)/tau \
+np.concatenate((np.sin(pitch),-np.cos(pitch)*np.tan(roll),np.ones_like(pitch)),axis=1)*dyaw
S_inv = np.swapaxes(np.concatenate((
np.concatenate((np.ones_like(pitch),np.zeros_like(pitch),-np.sin(pitch)),axis=1)[:,:,np.newaxis],
np.concatenate((np.zeros_like(roll),np.cos(roll),np.cos(pitch)*np.sin(roll)),axis=1)[:,:,np.newaxis],
np.concatenate((np.zeros_like(roll),-np.sin(roll),np.cos(pitch)*np.cos(roll)),axis=1)[:,:,np.newaxis]),axis=2),1,2)
Omega = np.einsum('ijk,ik->ij',S_inv,dzhi)
C_inv = np.swapaxes(np.concatenate((
(-1*by/tau)[:,:,np.newaxis],
(bx/np.cos(roll)/tau)[:,:,np.newaxis],
bz[:,:,np.newaxis]),axis=2),1,2)
d = np.concatenate((
np.cos(yaw)*np.sin(roll)-np.cos(roll)*np.sin(yaw)*np.sin(pitch),
np.sin(yaw)*np.sin(roll)+np.cos(roll)*np.cos(yaw)*np.sin(pitch),
np.zeros_like(yaw)),axis=1)*tau
dtau = np.einsum('ij,ij->i',bz,jer-d*dyaw)[:,np.newaxis]
# ddzhi & dOmega
dS = np.swapaxes(np.concatenate((
np.concatenate((np.zeros_like(roll),np.cos(roll)*np.tan(pitch),-np.sin(roll)*np.tan(pitch)),axis=1)[:,:,np.newaxis],
np.concatenate((np.zeros_like(roll),-np.sin(roll),-np.cos(roll)),axis=1)[:,:,np.newaxis],
np.concatenate((np.zeros_like(roll),np.cos(roll)/np.cos(pitch),-np.sin(roll)/np.cos(pitch)),axis=1)[:,:,np.newaxis]
),axis=2),1,2)*(dzhi[:,0])[:,np.newaxis,np.newaxis] \
+np.swapaxes(np.concatenate((
np.concatenate((
np.zeros_like(roll),
np.sin(roll)/np.cos(pitch)/np.cos(pitch),
np.cos(roll)/np.cos(pitch)/np.cos(pitch)),axis=1)[:,:,np.newaxis],
np.concatenate((np.zeros_like(roll),np.zeros_like(roll),np.zeros_like(roll)),axis=1)[:,:,np.newaxis],
np.concatenate((
np.zeros_like(roll),
np.sin(roll)*np.tan(pitch)/np.cos(pitch),
np.cos(roll)*np.tan(pitch)/np.cos(pitch)),axis=1)[:,:,np.newaxis]
),axis=2),1,2)*(dzhi[:,1])[:,np.newaxis,np.newaxis]
e = 2*dtau*np.einsum('ijk,ik->ij',
np.concatenate((-by[:,:,np.newaxis],bx[:,:,np.newaxis],np.zeros_like(by[:,:,np.newaxis])),axis=2),Omega) \
+tau*np.einsum('ijk,ik->ij',
np.concatenate((bx[:,:,np.newaxis],by[:,:,np.newaxis],bz[:,:,np.newaxis]),axis=2),
np.concatenate(((Omega[:,0]*Omega[:,2])[:,np.newaxis],
(Omega[:,1]*Omega[:,2])[:,np.newaxis],
(-Omega[:,0]*Omega[:,0]-Omega[:,1]*Omega[:,1])[:,np.newaxis]),axis=1)) \
-tau*np.einsum('ijk,ik->ij',
np.concatenate((-by[:,:,np.newaxis],bx[:,:,np.newaxis],np.zeros_like(by[:,:,np.newaxis])),axis=2),
np.einsum('ijk,ik->ij',S_inv,np.einsum('ijk,ik->ij',dS,Omega)))
ddzhi = np.einsum('ijk,ik->ij',C_inv,sna-ddyaw*d-e)
ddzhi[:,2:] = ddyaw
dOmega = -np.einsum('ijk,ik->ij',S_inv,np.einsum('ijk,ik->ij',dS,Omega)) \
+np.einsum('ijk,ik->ij',S_inv,ddzhi)
I = np.einsum('ijk,ik->ij',np.repeat(self.J[np.newaxis,:,:],Omega.shape[0],0),Omega)
Mu = np.einsum('ijk,ik->ij',np.repeat(self.J[np.newaxis,:,:],dOmega.shape[0],0),dOmega) \
+np.concatenate((
(Omega[:,1]*I[:,2]-Omega[:,2]*I[:,1])[:,np.newaxis],
(Omega[:,2]*I[:,0]-Omega[:,0]*I[:,2])[:,np.newaxis],
(Omega[:,0]*I[:,1]-Omega[:,1]*I[:,0])[:,np.newaxis]),axis=1)
MT = np.zeros((Omega.shape[0],4))
MT[:,:3] = Mu
MT[:,3:] = Thrust
G1_inv = np.linalg.inv(self.G1)
Ws2 = np.einsum('ijk,ik->ij',np.repeat(G1_inv[np.newaxis,:,:],MT.shape[0],0),MT)
Ws = np.copysign(np.sqrt( | np.abs(Ws2) | numpy.abs |
import os
from datetime import datetime
import cv2
import numpy as np
import undistort
from undistort import FisheyeUndistorter, PerspectiveUndistorter
from perftimer import PerfTimer
from clize import run
class VisionProcessor(object):
# input image must already have all distortions applied
def __init__(self, img, edge_mask = None):
# image expected to be a cv2 BRG image
self.img = img.copy()
self.original_img = img.copy()
self.height, self.width, self.channels = img.shape
self.colorspace = "bgr"
self.failed = False
self.masked_img = None
self.contours = []
self.sorted_contours = None
self.farthest_cy = self.height
self.farthest_ly = self.height
self.edge_mask = edge_mask
def convertToHsv(self):
if self.colorspace == "bgr":
self.hsv_image = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
elif self.colorspace == "sat":
self.restoreOrigImage()
self.img = self.hsv_image.copy()
self.colorspace = "hsv"
def saturateHsv2Rgb(self):
if self.colorspace == "bgr":
self.convertToHsv()
self.hsv_image[:,:,1] = np.full((self.height, self.width), 255, dtype=np.uint8)
self.img = cv2.cvtColor(self.hsv_image.copy(), cv2.COLOR_HSV2BGR)
self.colorspace = "sat"
def restoreOrigImage(self):
self.img = self.original_img.copy()
self.colorspace = "bgr"
def crushChannel(self, chan, val = 0):
self.img[:,:,chan] = np.full((self.height, self.width), int(round(val)), dtype=np.uint8)
# try to create a mask around colourful objects, assuming the background is dark grey
# can work with all colours, but the hue range should be narrowed down if the colour is known
# this function can be called repeatedly and the masks will be OR'ed (stacked)
def maskRange(self, color_h_center = 30.0 / 2.0, color_h_range = 40.0, s_range = (0, 255), v_range = (64, 255)):
if self.colorspace == "edges" or self.colorspace == "sat":
self.restoreOrigImage()
if self.colorspace == "bgr":
self.convertToHsv()
if color_h_range > 90: # limit the range
color_h_range = 90
self.color_hue = color_h_center
h_max = int(round(color_h_center + color_h_range))
h_min = int(round(color_h_center - color_h_range))
s_rng = (int(round(s_range[0])), int(round(s_range[1])))
v_rng = (int(round(v_range[0])), int(round(v_range[1])))
hsv_min2 = None
hsv_max2 = None
if color_h_range >= 90: # detect any colourful object
hsv_min1 = np.array([0, s_rng[0], v_rng[0]])
hsv_max1 = np.array([180, s_rng[1], v_rng[1]])
elif h_max <= 180 and h_min >= 0: # normal case
hsv_min1 = np.array([h_min, s_rng[0], v_rng[0]])
hsv_max1 = np.array([h_max, s_rng[1], v_rng[1]])
elif h_max > 180: # center value just under 180, but coverage is above 180
hsv_min1 = np.array([h_min, s_rng[0], v_rng[0]])
hsv_max1 = np.array([180, s_rng[1], v_rng[1]])
hsv_max2 = np.array([h_max - 180, s_rng[1], v_rng[1]])
elif h_min < 0: # center value just above 0, but coverage is under 0
hsv_min1 = np.array([0, s_rng[0], v_rng[0]])
hsv_min2 = np.array([180 + h_min, s_rng[0], v_rng[0]])
hsv_max1 = np.array([h_max, s_rng[1], v_rng[1]])
masked_img1 = cv2.inRange(self.hsv_image, hsv_min1, hsv_max1)
masked_img2 = None
if hsv_min2 is not None:
masked_img2 = cv2.inRange(self.hsv_image, hsv_min2, hsv_max1)
if hsv_max2 is not None:
masked_img2 = cv2.inRange(self.hsv_image, hsv_min2, hsv_max1)
if self.masked_img is None:
self.masked_img = masked_img1
cv2.bitwise_or(self.masked_img, masked_img1, self.masked_img)
if masked_img2 is not None:
cv2.bitwise_or(self.masked_img, masked_img2, self.masked_img)
self.img = self.masked_img.copy() # image is now a mask, a single channel, 0 for false, 255 for true
self.colorspace = "mask"
def cannyEdgeDetect(self, center_val = 127, val_spread = 110, morph_kernel_size = 10, blur = True, blur_kernel_size = 5):
if self.colorspace == "hsv":
self.img = cv2.cvtColor(self.img, cv2.COLOR_HSV2BGR)
if blur:
kernel = np.ones((blur_kernel_size, blur_kernel_size), np.float32) / (blur_kernel_size ** 2)
src_img = cv2.filter2D(self.img, -1, kernel)
else:
src_img = self.img
self.masked_img = cv2.Canny(src_img, center_val - val_spread, center_val + val_spread)
if self.edge_mask is not None:
self.masked_img = np.bitwise_and(self.masked_img, self.edge_mask)
kernel = np.ones((morph_kernel_size, morph_kernel_size), np.uint8)
self.masked_img = cv2.morphologyEx(self.masked_img, cv2.MORPH_CLOSE, kernel)
self.img = self.masked_img.copy()
self.colorspace = "edges"
def findContours(self, limit = 3):
contours, hierarchy = cv2.findContours(self.img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # finds all contours
# remove impossibly big and incredibly small contours
i = 0
while i < len(contours):
c = TapeContour(self, contours[i])
to_add = True
# large contour removal taken care of by the bitwise_and with edge_mask
# calculate the size of a speckle, remove it if it's too small
ratio = float(c.area) / float(self.width * self.height)
limit_ratio = (20.0 * 20.0) / (1333.0 * 720.0)
if ratio < limit_ratio:
to_add = False
if c.is_too_big:
to_add = False
if to_add:
self.contours.append(c)
i += 1
#print("found %u contours" % len(self.contours))
# remove overlaps
keep_removing = True
while keep_removing:
keep_removing = False
i = 0
while i < len(self.contours) - 1 and keep_removing == False:
j = i + 1
while j < len(self.contours) and keep_removing == False:
ci = self.contours[i]
cj = self.contours[j]
xsect, region = cv2.rotatedRectangleIntersection(ci.min_rect, cj.min_rect)
if cv2.INTERSECT_NONE != xsect:
if ci.area >= cj.area:
del self.contours[j]
else:
del self.contours[i]
keep_removing = True
break
j += 1
i += 1
self.sorted_contours = sorted(self.contours, key=self.calcRankedContourArea, reverse=True) # sort to find largest
if len(self.sorted_contours) > 0:
self.largest_contour = self.sorted_contours[0]
# remove contours until we are under the limit set
if limit > 0:
while len(self.sorted_contours) > limit:
del self.sorted_contours[-1]
self.contours = self.sorted_contours
def calcMeanAngle(self):
cnt = 0
area = 0.0
sum = 0.0
for c in self.contours:
if c.is_narrow:
ang = c.line_angle
area += c.area
sum += ang * c.area
cnt += 1
if cnt <= 0:
return 0.0, cnt
return (sum / float(area)), cnt
def calcBestFit(self, add_mid = False):
if self.contours is None or len(self.contours) <= 0:
self.failed = True
return
if len(self.contours) == 1:
add_mid = True
pass # TODO
mean_angle, mean_angle_cnt = self.calcMeanAngle()
mid_base = (float(self.width) / 2.0, float(self.height))
points_x = []
points_y = []
if add_mid:
points_x.append(mid_base[0])
points_y.append(mid_base[1])
for c in self.contours:
points_x.append(c.cx)
points_y.append(c.cy)
cyf = float(c.cy) / float(self.height)
if cyf < self.farthest_cy:
self.farthest_cy = cyf
points_x = np.array(points_x)
points_y = np.array(points_y)
self.poly, resi, rank, _, _ = np.polyfit(points_x, points_y, 1, full=True)
self.poly2, resi2, rank2, _, _ = np.polyfit(points_y, points_x, 1, full=True) # do it again with x and y swapped, just in case the ordinary way has bad rank (or is vertical)
if rank > rank2 and rank != 1:
x0 = 0
y0 = self.poly[1]
vx = 1.0
vy = self.poly[0]
self.line_vx = vx
self.line_vy = vy
self.line_x0 = x0
self.line_y0 = y0
angle = np.arctan2(vx, -vy) # calc angle relative to vertical, positive is clockwise
angle = int(round(np.rad2deg(angle)))
fit_angle = get_forward_angle(angle)
else:
x02 = 0
y02 = self.poly2[1]
vx2 = 1.0
vy2 = self.poly2[0]
# flip x and y
vx = vy2
vy = vx2
x0 = y02
y0 = x02
self.line_vx = vx
self.line_vy = vy
self.line_x0 = x0
self.line_y0 = y0
angle = np.arctan2(vx2, vy2)
angle = int(round(np.rad2deg(angle))) - 90
fit_angle = get_forward_angle(angle)
if fit_angle < 2 and fit_angle > -2: # vertical-ish line
vx = 0
self.line_vx = 0
#print("fit_angle %.1f [%u] mean_angle %.1f [%u] p(%.2f , %.2f)" % (fit_angle, len(points_x), mean_angle, mean_angle_cnt, vx, vy))
# check if the poly fit did a good job
good_fit = True
if len(self.contours) >= 3:
if vx != 0:
# check each point against distance to the line
m, b = self.get_line_equation()
i = 0
while i < len(self.contours) and good_fit:
ci = self.contours[i]
err = abs(b + (m * ci.cx) - ci.cy) / np.sqrt(1 + (m * m))
if err > self.height:
good_fit = False
break
i += 1
else: # vx is 0 so vertical line
i = 0
while i < len(self.contours) and good_fit:
ci = self.contours[i]
err = abs(ci.cx - x0)
if err > self.height:
good_fit = False
break
i += 1
# check distance between points
i = 0
while i < len(points_x) - 1 and good_fit:
j = i + 1
while j < len(points_x) and good_fit:
dx = abs(points_x[i] - points_x[j])
dy = abs(points_y[i] - points_y[j])
dist = np.sqrt((dx * dx) + (dy * dy))
if dist > self.height:
good_fit = False
break
j += 1
i += 1
use_fit_angle = False
if mean_angle_cnt <= 0:
use_fit_angle = True
elif mean_angle_cnt <= 1 and good_fit == True and len(self.contours) >= 3:
use_fit_angle = True
elif mean_angle_cnt >= 2 and add_mid == False and len(self.contours) <= 2:
use_fit_angle = False
else:
if abs(mean_angle - fit_angle) < 45 or good_fit == True:
use_fit_angle = True
if add_mid and len(points_x) == 2 and self.sorted_contours[0].is_narrow:
self.line_angle = (fit_angle + mean_angle) / 2.0
x0 = float(self.width) / 2.0
self.line_x0 = x0
y0 = self.height
self.line_y0 = y0
vx = np.sin(np.deg2rad(self.line_angle))
vy = -np.cos(np.deg2rad(self.line_angle))
elif use_fit_angle:
self.line_angle = fit_angle
else:
self.line_angle = mean_angle
i = 0
while i < len(self.sorted_contours):
c = self.sorted_contours[i]
if c.is_narrow:
x0 = c.cx
y0 = c.cy
vx = np.sin(np.deg2rad(mean_angle))
vy = -np.cos(np.deg2rad(mean_angle))
j = i + 1
while j < len(self.sorted_contours):
c = self.sorted_contours[j]
if c.is_narrow:
x02 = c.cx
y02 = c.cy
x0 = (x0 + x02) / 2
y0 = (y0 + y02) / 2
break
j += 1
if vx == 0:
self.line_vx = 0
self.line_x0 = x0
break
i += 1
if vx > 0 or vx < 0:
self.line_lefty = int(round(((-x0) * vy / vx) + y0))
self.line_righty = int(round(((self.width - x0) * vy / vx) + y0))
min_ly = min(self.line_lefty, self.line_righty)
self.farthest_ly = float(abs(self.height - min_ly)) / float(self.height)
else:
self.line_lefty = 0
self.line_righty = 0
if self.line_vx != 0:
m, b = self.get_line_equation()
x_bottom = get_line_x_for_y(self.height, m, b)
else:
x_bottom = self.line_x0
mid = float(self.width) / 2.0
dx = x_bottom - mid
px = dx / mid
self.x_bottom = px
def visualize(self, line_thickness = 5, hue = 0):
hsv_img = self.hsv_image.copy()
if hue < 0:
hue = int(self.color_hue + 90 + 180) % 180 # we only care about the hue, get its opposite
hue = int(round(hue))
if self.contours is None or len(self.contours) <= 0:
return cv2.cvtColor(hsv_img.copy(), cv2.COLOR_HSV2BGR)
for c in self.contours:
c.visualize(hsv_img, (hue, 255, 255), line_thickness)
try:
if self.line_vx > 0 or self.line_vx < 0:
cv2.line(hsv_img, (self.width - 1, self.line_righty), (0, self.line_lefty), (hue, 255, 255), line_thickness)
else:
cv2.line(hsv_img, (int(round(self.line_x0)), 0), (int(round(self.line_x0)), self.height - 1), (hue, 255, 255), line_thickness)
except:
pass
return cv2.cvtColor(hsv_img.copy(), cv2.COLOR_HSV2BGR)
def get_angle(self):
if self.failed:
return 0
return self.line_angle
def get_line_equation(self):
if self.failed:
return 0, 0
if self.line_vx == 0:
return float("inf"), float("inf")
m = float(self.line_vy) / float(self.line_vx)
b = float(self.line_y0) - (m * float(self.line_x0))
return m, b
# this is used for sorting purposes, the contour closer to the robot is ranked as larger
def calcRankedContourArea(self, contour):
return contour.get_rankedArea()
class TapeContour(object):
def __init__(self, parent, contour):
self.parent = parent
self.original_contour = contour
self.min_rect = cv2.minAreaRect(contour)
self.box_points = cv2.boxPoints(self.min_rect)
(x, y), (width, height), rect_angle = self.min_rect
self.cx = x
self.cy = y
self.width = width
self.height = height
self.area = width * height
max_dim = max(width, height)
min_dim = min(width, height)
#diff_dim = max_dim - min_dim
if min_dim * 1.5 < max_dim:
self.is_narrow = True
else:
self.is_narrow = False
if float(min_dim) / parent.width >= (150.0 / 1333.0):
self.is_too_big = True
else:
self.is_too_big = False
if width > height:
line_angle = rect_angle + 90
else:
line_angle = rect_angle
self.line_angle = get_forward_angle(line_angle)
def get_rankedArea(self):
factor = 0.5
scale = (1.0 - factor) + (factor * (self.cy / float(self.parent.height)))
return float(self.area) * scale
def visualize(self, img, colour=(255, 0, 0), thickness = 5):
box = np.int0(self.box_points)
cv2.drawContours(img, [box], 0, colour, thickness)
class VisionPilot(object):
def __init__(self, edge_mask = None, ang_steer_coeff = 2.2, offset_steer_coeff = 64, dist_throttle_coeff = 0.5, steer_max = 128, throttle_max = 128, savedir=""):
self.perftimer = PerfTimer()
self.edge_mask = edge_mask
self.ang_steer_coeff = float(ang_steer_coeff)
self.offset_steer_coeff = float(offset_steer_coeff)
self.dist_throttle_coeff = float(dist_throttle_coeff)
self.steer_max = float(steer_max)
self.throttle_max = float(throttle_max)
self.last_steering = 0
self.save_dir = savedir
self.save_cnt = 0
if self.save_dir is not None:
if len(self.save_dir) > 0:
try:
os.makedirs(self.save_dir)
except FileExistsError:
pass
# returns values good for driving directly
def process(self, img_arr, fname=None):
self.proc = VisionProcessor(img_arr, edge_mask = self.edge_mask)
self.proc.convertToHsv()
self.proc.saturateHsv2Rgb()
self.proc.crushChannel(0) # removes all blue, for Circuit Launch's carpet
self.proc.cannyEdgeDetect()
self.proc.maskRange() # finds normal
self.proc.findContours()
if len(self.proc.contours) <= 0:
self.proc.maskRange(color_h_range = 90, s_range = (0.0, 255.0 * 0.2), v_range = (255.0 * 0.90, 255.0)) # find white
self.proc.findContours()
self.proc.calcBestFit()
if self.proc.failed:
if self.last_steering >= 0:
return 64, 127
else:
return -64, 127
angle = self.proc.get_angle()
throttle = float(self.throttle_max) * (self.proc.farthest_ly / self.dist_throttle_coeff)
if throttle > self.throttle_max:
throttle = self.throttle_max
ang_component = angle * self.ang_steer_coeff
offset_component = self.proc.x_bottom * self.offset_steer_coeff
steering = ang_component + offset_component
#print("angle %f , ang_co %f , offset_co %f" % (angle, ang_component, offset_component))
if steering > self.steer_max:
steering = self.steer_max
elif steering < -self.steer_max:
steering = -self.steer_max
self.steering = steering
self.throttle = throttle
self.last_steering = steering
self.save_training(img_arr, fname = fname)
return float(steering), float(throttle)
# returns values good for neural networks
def run(self, img_arr):
steering, throttle = self.process(img_arr)
steering /= 127.0
throttle /= 127.0
self.perftimer.tick()
return np.clip(steering, -1.0, 1.0), | np.clip(throttle, -1.0, 1.0) | numpy.clip |
from sklearn.neighbors import KernelDensity
from scipy.signal import argrelextrema
import numpy as np
import math
from itertools import compress
def sort_noise(timecourses=None,
lag1=None,
return_logpdf=False,
method='KDE',
verbose=False):
'''
Sorts timecourses into two clusters (signal and noise) based on
lag-1 autocorrelation.
Timecourses should be a np array of shape (n, t).
Returns noise_components, a np array with 1 value for all noise
timecourses detected, as well as the cutoff value detected
'''
if method == 'KDE':
# calculate lag autocorrelations
if lag1 is None:
assert timecourses is not None, 'sortNoise requires either timecourses or lag1'
lag1 = lag_n_autocorr(timecourses, 1)
# calculate minimum between min and max peaks
kde_skl = KernelDensity(kernel='gaussian',
bandwidth=0.05).fit(lag1[:, np.newaxis])
x_grid = np.linspace(-0.2, 1.2, 1200)
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
maxima = argrelextrema( | np.exp(log_pdf) | numpy.exp |
"""
oracle.py
Variable Markov Oracle in python
@copyright:
Copyright (C) 9.2014 <NAME>
This file is part of vmo.
@license:
vmo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
vmo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with vmo. If not, see <http://www.gnu.org/licenses/>.
@author: <NAME>
@contact: <EMAIL>, <EMAIL>
"""
import numpy as np
import scipy.spatial.distance as dist
import vmo.VMO.utility.misc as utl
class data(object):
"""A helper class to encapsulate objects for symbolic comparison
By default, the first entry of the list or tuple is used as the feature to
test for equality between different data object.
Attributes:
content: a list or tuple
idx: the index of the list or tuple to be tested for equality
"""
def __init__(self, data_item, index=0):
self.content = data_item
self.idx = index
def __repr__(self):
return str(self.content)
def __eq__(self, other):
if type(other) == data:
if self.content[self.idx] == other.content[self.idx]:
return True
else:
return False
else:
return False
def __ne__(self, other):
return not (self == other)
class FactorOracle(object):
""" The base class for the FO(factor oracle) and MO(variable markov oracle)
Attributes:
sfx: a list containing the suffix link of each state.
trn: a list containing the forward links of each state as a list.
rsfx: a list containing the reverse suffix links of each state
as a list.
lrs: the value of longest repeated suffix of each state.
data: the symobols associated with the direct link
connected to each state.
compror: a list of tuples (i, i-j), i is the current coded position,
i-j is the length of the corresponding coded words.
code: a list of tuples (len, pos), len is the length of the
corresponding coded words, pos is the position where the coded
words starts.
seg: same as code but non-overlapping.
f_array: (For kind 'a' and 'v'): a list containing the feature array
latent: (For kind 'a' and 'v'): a list of lists with each sub-list
containing the indexes for each symbol.
kind:
'a': Variable Markov oracle
'f': repeat oracle
'v': Centroid-based oracle (under test)
n_states: number of total states, also is length of the input
sequence plus 1.
max_lrs: the longest lrs so far.
avg_lrs: the average lrs so far.
name: the name of the oracle.
params: a python dictionary for different feature and distance settings.
keys:
'thresholds': the minimum value for separating two values as
different symbols.
'weights': a dictionary containing different weights for features
used.
'dfunc': the distance function.
"""
def __init__(self, **kwargs):
# Basic attributes
self.sfx = []
self.trn = []
self.rsfx = []
self.lrs = []
self.data = []
# Compression attributes
self.compror = []
self.code = []
self.seg = []
# Object attributes
self.kind = 'f'
self.name = ''
# Oracle statistics
self.n_states = 1
self.max_lrs = []
self.max_lrs.append(0)
self.avg_lrs = []
self.avg_lrs.append(0.0)
# Oracle parameters
self.params = {
'threshold': 0,
'dfunc': 'cosine',
'dfunc_handle': None,
'dim': 1
}
self.update_params(**kwargs)
# Adding zero state
self.sfx.append(None)
self.rsfx.append([])
self.trn.append([])
self.lrs.append(0)
self.data.append(0)
def reset(self, **kwargs):
self.update_params(**kwargs)
# Basic attributes
self.sfx = []
self.trn = []
self.rsfx = []
self.lrs = []
self.data = []
# Compression attributes
self.compror = []
self.code = []
self.seg = []
# Object attributes
self.kind = 'f'
self.name = ''
# Oracle statistics
self.n_states = 1
self.max_lrs = []
self.max_lrs.append(0)
self.avg_lrs = []
self.avg_lrs.append(0.0)
# Adding zero state
self.sfx.append(None)
self.rsfx.append([])
self.trn.append([])
self.lrs.append(0)
self.data.append(0)
def update_params(self, **kwargs):
"""Subclass this"""
self.params.update(kwargs)
def add_state(self, new_data):
"""Subclass this"""
pass
def _encode(self):
_code = []
_compror = []
if not self.compror:
j = 0
else:
j = self.compror[-1][0]
i = j
while j < self.n_states - 1:
while i < self.n_states - 1 and self.lrs[i + 1] >= i - j + 1:
i += 1
if i == j:
i += 1
_code.append([0, i])
_compror.append([i, 0])
else:
_code.append([i - j, self.sfx[i] - i + j + 1])
_compror.append([i, i - j])
j = i
return _code, _compror
def encode(self):
_c, _cmpr = self._encode()
self.code.extend(_c)
self.compror.extend(_cmpr)
return self.code, self.compror
def segment(self):
"""An non-overlap version Compror"""
if not self.seg:
j = 0
else:
j = self.seg[-1][1]
last_len = self.seg[-1][0]
if last_len + j > self.n_states:
return
i = j
while j < self.n_states - 1:
while not (not (i < self.n_states - 1) or not (self.lrs[i + 1] >= i - j + 1)):
i += 1
if i == j:
i += 1
self.seg.append((0, i))
else:
if (self.sfx[i] + self.lrs[i]) <= i:
self.seg.append((i - j, self.sfx[i] - i + j + 1))
else:
_i = j + i - self.sfx[i]
self.seg.append((_i - j, self.sfx[i] - i + j + 1))
_j = _i
while not (not (_i < i) or not (self.lrs[_i + 1] - self.lrs[_j] >= _i - _j + 1)):
_i += 1
if _i == _j:
_i += 1
self.seg.append((0, _i))
else:
self.seg.append((_i - _j, self.sfx[_i] - _i + _j + 1))
j = i
return self.seg
def _ir(self, alpha=1.0):
code, _ = self.encode()
cw = np.zeros(len(code)) # Number of code words
for i, c in enumerate(code):
cw[i] = c[0] + 1
c0 = [1 if x[0] == 0 else 0 for x in self.code]
h0 = np.log2(np.cumsum(c0))
h1 = np.zeros(len(cw))
for i in range(1, len(cw)):
h1[i] = utl.entropy(cw[0:i + 1])
ir = alpha * h0 - h1
return ir, h0, h1
def _ir_fixed(self, alpha=1.0):
code, _ = self.encode()
h0 = np.log2(self.num_clusters())
if self.max_lrs[-1] == 0:
h1 = np.log2(self.n_states - 1)
else:
h1 = np.log2(self.n_states - 1) + np.log2(self.max_lrs[-1])
BL = np.zeros(self.n_states - 1)
j = 0
for i in range(len(code)):
if self.code[i][0] == 0:
BL[j] = 1
j += 1
else:
L = code[i][0]
BL[j:j + L] = L # range(1,L+1)
j = j + L
ir = alpha * h0 - h1 / BL
ir[ir < 0] = 0
return ir, h0, h1
def _ir_cum(self, alpha=1.0):
code, _ = self.encode()
N = self.n_states
cw0 = np.zeros(N - 1) # cw0 counts the appearance of new states only
cw1 = np.zeros(N - 1) # cw1 counts the appearance of all compror states
BL = np.zeros(N - 1) # BL is the block length of compror codewords
j = 0
for i in range(len(code)):
if self.code[i][0] == 0:
cw0[j] = 1
cw1[j] = 1
BL[j] = 1
j += 1
else:
L = code[i][0]
cw1[j] = 1
BL[j:j + L] = L # range(1,L+1)
j = j + L
h0 = np.log2(np.cumsum(cw0))
h1 = np.log2(np.cumsum(cw1))
h1 = h1 / BL
ir = alpha * h0 - h1
ir[ir < 0] = 0
return ir, h0, h1
def _ir_cum2(self, alpha=1.0):
code, _ = self.encode()
N = self.n_states
BL = np.zeros(N - 1) # BL is the block length of compror codewords
h0 = np.log2(np.cumsum(
[1.0 if sfx == 0 else 0.0 for sfx in self.sfx[1:]])
)
"""
h1 = np.array([h if m == 0 else h+np.log2(m)
for h,m in zip(h0,self.lrs[1:])])
h1 = np.array([h if m == 0 else h+np.log2(m)
for h,m in zip(h0,self.max_lrs[1:])])
h1 = np.array([h if m == 0 else h+np.log2(m)
for h,m in zip(h0,self.avg_lrs[1:])])
"""
h1 = np.array([np.log2(i + 1) if m == 0 else np.log2(i + 1) + np.log2(m)
for i, m in enumerate(self.max_lrs[1:])])
j = 0
for i in range(len(code)):
if self.code[i][0] == 0:
BL[j] = 1
j += 1
else:
L = code[i][0]
BL[j:j + L] = L # range(1,L+1)
j = j + L
h1 = h1 / BL
ir = alpha * h0 - h1
ir[ir < 0] = 0 # Really a HACK here!!!!!
return ir, h0, h1
def _ir_cum3(self, alpha=1.0):
h0 = np.log2(np.cumsum(
[1.0 if sfx == 0 else 0.0 for sfx in self.sfx[1:]])
)
h1 = np.array([h if m == 0 else (h + np.log2(m)) / m
for h, m in zip(h0, self.lrs[1:])])
ir = alpha * h0 - h1
ir[ir < 0] = 0 # Really a HACK here!!!!!
return ir, h0, h1
def IR(self, alpha=1.0, ir_type='cum'):
if ir_type == 'cum':
return self._ir_cum(alpha)
elif ir_type == 'all':
return self._ir(alpha)
elif ir_type == 'fixed':
return self._ir_fixed(alpha)
elif ir_type == 'cum2':
return self._ir_cum2(alpha)
elif ir_type == 'cum3':
return self._ir_cum3(alpha)
def num_clusters(self):
return len(self.rsfx[0])
def threshold(self):
if self.params.get('threshold'):
return int(self.params.get('threshold'))
else:
raise ValueError("Threshold is not set!")
def dfunc(self):
if self.params.get('dfunc'):
return self.params.get('dfunc')
else:
raise ValueError("dfunc is not set!")
def dfunc_handle(self, a, b_vec):
fun = self.params['dfunc_handle']
return fun(a, b_vec)
def _len_common_suffix(self, p1, p2):
if p2 == self.sfx[p1]:
return self.lrs[p1]
else:
while self.sfx[p2] != self.sfx[p1] and p2 != 0:
p2 = self.sfx[p2]
return min(self.lrs[p1], self.lrs[p2])
def _find_better(self, i, symbol):
self.rsfx[self.sfx[i]].sort()
for j in self.rsfx[self.sfx[i]]:
if (self.lrs[j] == self.lrs[i] and
self.data[j - self.lrs[i]] == symbol):
return j
return None
class FO(FactorOracle):
""" An implementation of the factor oracle
"""
def __init__(self, **kwargs):
super(FO, self).__init__(**kwargs)
self.kind = 'r'
def add_state(self, new_symbol):
"""
:type self: oracle
"""
self.sfx.append(0)
self.rsfx.append([])
self.trn.append([])
self.lrs.append(0)
self.data.append(new_symbol)
self.n_states += 1
i = self.n_states - 1
self.trn[i - 1].append(i)
k = self.sfx[i - 1]
pi_1 = i - 1
# Adding forward links
while k is not None:
_symbols = [self.data[state] for state in self.trn[k]]
if self.data[i] not in _symbols:
self.trn[k].append(i)
pi_1 = k
k = self.sfx[k]
else:
break
if k is None:
self.sfx[i] = 0
self.lrs[i] = 0
else:
_query = [[self.data[state], state] for state in
self.trn[k] if self.data[state] == self.data[i]]
_query = sorted(_query, key=lambda _query: _query[1])
_state = _query[0][1]
self.sfx[i] = _state
self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1
k = self._find_better(i, self.data[i - self.lrs[i]])
if k is not None:
self.lrs[i] += 1
self.sfx[i] = k
self.rsfx[self.sfx[i]].append(i)
if self.lrs[i] > self.max_lrs[i - 1]:
self.max_lrs.append(self.lrs[i])
else:
self.max_lrs.append(self.max_lrs[i - 1])
self.avg_lrs.append(self.avg_lrs[i - 1] * ((i - 1.0) / (self.n_states - 1.0)) +
self.lrs[i] * (1.0 / (self.n_states - 1.0)))
def accept(self, context):
""" Check if the context could be accepted by the oracle
Args:
context: s sequence same type as the oracle data
Returns:
bAccepted: whether the sequence is accepted or not
_next: the state where the sequence is accepted
"""
_next = 0
for _s in context:
_data = [self.data[j] for j in self.trn[_next]]
if _s in _data:
_next = self.trn[_next][_data.index(_s)]
else:
return 0, _next
return 1, _next
def get_alphabet(self):
alphabet = [self.data[i] for i in self.trn[0]]
dictionary = dict(zip(alphabet, range(len(alphabet))))
return dictionary
@property
def latent(self):
latent = []
for s in self.trn[0]:
indices = set([s])
indices = utl.get_rsfx(self, indices, s)
latent.append(list(indices))
return latent
class MO(FactorOracle):
def __init__(self, **kwargs):
super(MO, self).__init__(**kwargs)
self.kind = 'a'
self.f_array = feature_array(self.params['dim'])
self.f_array.add(np.zeros(self.params['dim'], ))
self.data[0] = None
self.latent = []
def reset(self, **kwargs):
super(MO, self).reset(**kwargs)
self.kind = 'a'
# self.f_array = [0]
self.f_array = feature_array(self.params['dim'])
self.f_array.add(np.zeros(self.params['dim'], ))
self.data[0] = None
self.latent = []
def add_state(self, new_data, method='inc'):
"""Create new state and update related links and compressed state"""
self.sfx.append(0)
self.rsfx.append([])
self.trn.append([])
self.lrs.append(0)
# Experiment with pointer-based
self.f_array.add(new_data)
self.n_states += 1
i = self.n_states - 1
# assign new transition from state i-1 to i
self.trn[i - 1].append(i)
k = self.sfx[i - 1]
pi_1 = i - 1
# iteratively backtrack suffixes from state i-1
if method == 'inc':
suffix_candidate = 0
elif method == 'complete':
suffix_candidate = []
else:
suffix_candidate = 0
while k is not None:
if self.params['dfunc'] == 'other':
# dvec = self.dfunc_handle([new_data],
# self.f_array[self.trn[k]])[0]
dvec = dist.cdist([new_data],
self.f_array[self.trn[k]],
metric=self.params['dfunc_handle'])[0]
else:
dvec = dist.cdist([new_data],
self.f_array[self.trn[k]],
metric=self.params['dfunc'])[0]
I = np.where(dvec < self.params['threshold'])[0]
if len(I) == 0: # if no transition from suffix
self.trn[k].append(i) # Add new forward link to unvisited state
pi_1 = k
if method != 'complete':
k = self.sfx[k]
else:
if method == 'inc':
if I.shape[0] == 1:
suffix_candidate = self.trn[k][I[0]]
else:
suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]]
break
elif method == 'complete':
suffix_candidate.append((self.trn[k][I[np.argmin(dvec[I])]],
np.min(dvec)))
else:
suffix_candidate = self.trn[k][I[np.argmin(dvec[I])]]
break
if method == 'complete':
k = self.sfx[k]
if method == 'complete':
if not suffix_candidate:
self.sfx[i] = 0
self.lrs[i] = 0
self.latent.append([i])
self.data.append(len(self.latent) - 1)
else:
sorted_suffix_candidates = sorted(suffix_candidate,
key=lambda suffix: suffix[1])
self.sfx[i] = sorted_suffix_candidates[0][0]
self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1
self.latent[self.data[self.sfx[i]]].append(i)
self.data.append(self.data[self.sfx[i]])
else:
if k is None:
self.sfx[i] = 0
self.lrs[i] = 0
self.latent.append([i])
self.data.append(len(self.latent) - 1)
else:
self.sfx[i] = suffix_candidate
self.lrs[i] = self._len_common_suffix(pi_1, self.sfx[i] - 1) + 1
self.latent[self.data[self.sfx[i]]].append(i)
self.data.append(self.data[self.sfx[i]])
# Temporary adjustment
k = self._find_better(i, self.data[i - self.lrs[i]])
if k is not None:
self.lrs[i] += 1
self.sfx[i] = k
self.rsfx[self.sfx[i]].append(i)
if self.lrs[i] > self.max_lrs[i - 1]:
self.max_lrs.append(self.lrs[i])
else:
self.max_lrs.append(self.max_lrs[i - 1])
self.avg_lrs.append(self.avg_lrs[i - 1] * ((i - 1.0) / (self.n_states - 1.0)) +
self.lrs[i] * (1.0 / (self.n_states - 1.0)))
class feature_array:
def __init__(self, dim):
self.data = | np.zeros((100, dim)) | numpy.zeros |
### Non dimensional parameters vs. metrics
from math import *
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import pylab as pl
import scipy.io
import scipy as spy
import seaborn as sns
import sys
#lib_path = os.path.abspath('../../Building_canyon/BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
lib_path = os.path.abspath('../BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
sys.path.append(lib_path)
import ReadOutTools_MITgcm as rout
import MetricsPythonTools as mpt
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------
def get_metrics(exp, run, TrNum, key):
'''Get column from a tracer metrics pandas dataframe using the key name, run (01, 02, etc) and experiment
abreviated name (BAR, CNTDIFF, 3DDIFF, NOGMREDI). All input variables are strings. Returns the desired column from the dataframe'''
df = pd.read_csv(('results/metricsDataFrames/%srun%sTr%s.csv' %(exp,run,TrNum)))
col = df[key]
return col
def get_water(exp, run, key):
'''Get column from a tracer metrics pandas dataframe using the key name, run (01, 02, etc) and experiment
abreviated name (BAR, CNTDIFF, 3DDIFF, NOGMREDI). All input variables are strings. Returns the desired column from the dataframe'''
df = pd.read_csv(('results/metricsDataFrames/%srun%s.csv' %(exp,run)))
col = df[key]
return col
def get_areas(file, key):
'''Get column from a tracer metrics pandas dataframe using the key name, run (01, 02, etc) and experiment
abreviated name (BAR, CNTDIFF, 3DDIFF, NOGMREDI). All input variables are strings. Returns the desired column from the dataframe'''
df = pd.read_csv(file)
col = df[key]
return col
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------
sns.set()
sns.set_style('darkgrid')
sns.set_context('poster')
#Exp
CGrid = '/Users/Karina/Research/PhD/Tracers/TemporaryData/BARKLEY/run01/gridGlob.nc' # Smallest volume grid, closed bdy, no canyon.
#CGrid = '/ocean/kramosmu/MITgcm/TracerExperiments/CNTDIFF/run03/gridGlob.nc' # Smallest volume grid, closed bdy, no canyon.
CGridOut = Dataset(CGrid)
# General input
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
numTr = 24 # number of tracers in total (CNT =20, 3D = 4)
rc = CGridOut.variables['RC']
xc = rout.getField(CGrid, 'XC') # x coords tracer cells
yc = rout.getField(CGrid, 'YC') # y coords tracer cells
drF = CGridOut.variables['drF'] # vertical distance between faces
drC = CGridOut.variables['drC'] # vertical distance between centers
labels = ['$K_v=10^{-7}$(out), $10^{-3}$(in), $K_i=1 m^2s^{-1}$','Kv=1E-7(out), 1E-4(in), Ki=1','Kv=1E-5(out), 1E-3(in), Ki=1',
'Kv=1E-5(out), 1E-4(in), Ki=1','Kv=1E-5, Ki=1','Kv=1E-4, Ki=1','Kv=1E-3, Ki=1','Kv=3.8E-5, Ki=10',
'Kv=2.8E-5, Ki=10','Kv=1.3E-5, Ki=10','Kv_noc=1E-5, Ki=1','Kv_noc=1E-4, Ki=1','Kv_noc=1E-3, Ki=1',
'Kv=1E-5, Ki=10','Kv=1E-4, Ki=10','Kv=1E-3, Ki=10','Kv=1E-5, Ki=0.1','Kv=1E-4, Ki=0.1',
'Kv=1E-3, Ki=0.1','Kv=3.8E-5, Ki=1','Kv=2.8E-5, Ki=1','Kv=1.3E-5, Ki=1','Kv=1E-4, Ki=1, Kt=Ks','Kv=1E-3, Ki=1,Kt=Ks']
wlabels = ['run04 - 3D','run05 - 3D','run06 - 3D','run07 - 3D','run02 - CNT','run03 - CNT','run04 - CNT',
'run07 - CNT','run09 - CNT','run10 - CNT','run11 - CNT','run12 - CNT']
times = np.arange(0,nt,1)
# LOAD AREAS
CS1A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS1area' )
CS2A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS2area' )
CS3A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS3area' )
CS3sbA = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS3sbarea' )
CS4A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS4area' )
CS5A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'CS5area' )
AS1A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'AS1area' )
AS2A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'AS2area' )
LID1A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'LID1area' )
LID2A = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'LID2area' )
VolHole = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'VolHole' )
VoleShwoHole = get_areas('results/metricsDataFrames/Canyon_AreasVolumes.csv', 'VolShNoHole' )
tracers_3D = ['04','05','06','07'] #run number because there's only 1 tr per run
tracers_CNT03 = ['1','2','3'] # tracer number , constant runs
tracers_CNT09 = ['1','2','3'] # tracer number , average diffusivity runs
tracers_CNT07 = ['1','2','3'] # tracer number , no canyon case
tracers_CNT02 = ['1','2','3'] # tracer number , Kiso=0.1
tracers_CNT04 = ['1','2','3'] # tracer number , Kiso=10
tracers_CNT10 = ['1','2','3'] # tracer number , Kiso=1
tracers_CNT11 = ['2'] # tracer number , Kiso=1, Ks=Kt=10^4
tracers_CNT12 = ['3'] # tracer number , Kiso=1, Ks=Kt=10^3
# LOAD TRACER ON SHELF DATA
TrOnSh = np.zeros((nt,numTr))
HWC = np.zeros((nt,numTr))
kk = 0
fields = ['TronShelfwHole', 'HCWonShelfwHole','TronHole','HCWonHole']
for ii in tracers_3D:
TrOnShwHole = get_metrics('3DDIFF_hole_', ii, '1', fields[0] )
TrOnHole = get_metrics('3DDIFF_hole_', ii, '1', fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('3DDIFF_hole_', ii, '1', fields[1] )
HWChole = get_metrics('3DDIFF_hole_', ii, '1', fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT03:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '03',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '03', ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '03', ii,fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '03',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT09:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '09',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_','09',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '09',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '09',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT07:
TrSh = get_metrics('CNTDIFF_hole_', '07', ii, fields[0] )
TrHole= get_metrics('CNTDIFF_hole_', '07', ii, fields[2] )
HWCSh = get_metrics('CNTDIFF_hole_', '07', ii, fields[1] )
HWCHole = get_metrics('CNTDIFF_hole_', '07', ii, fields[3] )
TrOnSh[:,kk] = TrHole
HWC[:,kk] = HWCHole
kk=kk+1
for ii in tracers_CNT02:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '02',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '02',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '02',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '02', ii,fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT04:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '04',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '04',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '04',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '04',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT10:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '10',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '10',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '10',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '10',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT11:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '11',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '11',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '11',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '11',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
for ii in tracers_CNT12:
TrOnShwHole = get_metrics('CNTDIFF_hole_', '12',ii, fields[0] )
TrOnHole = get_metrics('CNTDIFF_hole_', '12',ii, fields[2] )
TrOnSh[:,kk] = TrOnHole
HWCsh = get_metrics('CNTDIFF_hole_', '12',ii, fields[1] )
HWChole = get_metrics('CNTDIFF_hole_', '12',ii, fields[3] )
HWC[:,kk] = HWChole
kk=kk+1
# LOAD TRANSPORTS
CS1 = np.zeros((nt-1,numTr))
CS2 = np.zeros((nt-1,numTr))
CS3 = np.zeros((nt-1,numTr))
CS4 = np.zeros((nt-1,numTr))
CS5 = np.zeros((nt-1,numTr))
CS3sb = np.zeros((nt-1,numTr))
AS1 = np.zeros((nt-1,numTr))
AS2 = np.zeros((nt-1,numTr))
LID1 = np.zeros((nt-1,numTr))
LID2 = np.zeros((nt-1,numTr))
CS1a = np.zeros((nt-1,numTr))
CS2a = np.zeros((nt-1,numTr))
CS3a = np.zeros((nt-1,numTr))
CS4a = np.zeros((nt-1,numTr))
CS5a = np.zeros((nt-1,numTr))
CS3sba = np.zeros((nt-1,numTr))
AS1a = np.zeros((nt-1,numTr))
AS2a = np.zeros((nt-1,numTr))
LID1a = np.zeros((nt-1,numTr))
LID2a = np.zeros((nt-1,numTr))
CS1d = np.zeros((nt-1,numTr))
CS2d = np.zeros((nt-1,numTr))
CS3d = np.zeros((nt-1,numTr))
CS4d = np.zeros((nt-1,numTr))
CS5d = np.zeros((nt-1,numTr))
CS3sbd = np.zeros((nt-1,numTr))
AS1d = | np.zeros((nt-1,numTr)) | numpy.zeros |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([1,0,0,0,-1,0,0,0,1]) | numpy.array |
import numpy as np
import pandas as pd
import xarray as xr
from scipy.integrate import quad
import scipy.interpolate as spi
import pf_static_sph
from scipy import interpolate
from timeit import default_timer as timer
import mpmath as mpm
# ---- HELPER FUNCTIONS ----
def kcos_func(kgrid):
#
names = list(kgrid.arrays.keys())
functions_kcos = [lambda k: k, np.cos]
return kgrid.function_prod(names, functions_kcos)
def kpow2_func(kgrid):
#
names = list(kgrid.arrays.keys())
functions_kpow2 = [lambda k: k**2, lambda th: 0 * th + 1]
return kgrid.function_prod(names, functions_kpow2)
# ---- BASIC FUNCTIONS ----
def ur(mI, mB):
return (mB * mI) / (mB + mI)
def nu(mB, n0, gBB):
return np.sqrt(n0 * gBB / mB)
def epsilon(k, mB):
return k**2 / (2 * mB)
def omegak(k, mB, n0, gBB):
ep = epsilon(k, mB)
return np.sqrt(ep * (ep + 2 * gBB * n0))
def omegak_grid(kgrid, mB, n0, gBB):
names = list(kgrid.arrays.keys())
functions_Wk = [lambda k: omegak(k, mB, n0, gBB), lambda th: 0 * th + 1]
return kgrid.function_prod(names, functions_Wk)
def Omega(kgrid, DP, mI, mB, n0, gBB):
names = list(kgrid.arrays.keys()) # ***need to have arrays added as k, th when kgrid is created
if names[0] != 'k':
print('CREATED kgrid IN WRONG ORDER')
functions_omega0 = [lambda k: omegak(k, mB, n0, gBB) + (k**2 / (2 * mI)), lambda th: 0 * th + 1]
omega0 = kgrid.function_prod(names, functions_omega0)
return omega0 - kcos_func(kgrid) * DP / mI
def Wk(kgrid, mB, n0, gBB):
names = list(kgrid.arrays.keys())
functions_Wk = [lambda k: np.sqrt(epsilon(k, mB) / omegak(k, mB, n0, gBB)), lambda th: 0 * th + 1]
return kgrid.function_prod(names, functions_Wk)
def g(kgrid, aIBi, mI, mB, n0, gBB):
# gives bare interaction strength constant
k_max = kgrid.getArray('k')[-1]
mR = ur(mI, mB)
return 1 / ((mR / (2 * np.pi)) * aIBi - (mR / np.pi**2) * k_max)
# ---- SPECTRUM RELATED FUNCTIONS ----
# def PCrit_inf(kcutoff, aIBi, mI, mB, n0, gBB):
# #
# DP = mI * nu(mB, n0, gBB) # condition for critical momentum is P-PB = mI*nu where nu is the speed of sound
# # non-grid helper function
# def Wk(k, gBB, mB, n0):
# return np.sqrt(eB(k, mB) / w(k, gBB, mB, n0))
# # calculate aSi
# def integrand(k): return (4 * ur(mI, mB) / (k**2) - ((Wk(k, gBB, mB, n0)**2) / (DP * k / mI)) * np.log((w(k, gBB, mB, n0) + (k**2) / (2 * mI) + (DP * k / mI)) / (w(k, gBB, mB, n0) + (k**2) / (2 * mI) - (DP * k / mI)))) * (k**2)
# val, abserr = quad(integrand, 0, kcutoff, epsabs=0, epsrel=1.49e-12)
# aSi = (1 / (2 * np.pi * ur(mI, mB))) * val
# # calculate PB (phonon momentum)
# def integrand(k): return ((2 * (w(k, gBB, mB, n0) + (k**2) / (2 * mI)) * (DP * k / mI) + (w(k, gBB, mB, n0) + (k**2) / (2 * mI) - (DP * k / mI)) * (w(k, gBB, mB, n0) + (k**2) / (2 * mI) + (DP * k / mI)) * np.log((w(k, gBB, mB, n0) + (k**2) / (2 * mI) - (DP * k / mI)) / (w(k, gBB, mB, n0) + (k**2) / (2 * mI) + (DP * k / mI)))) / ((w(k, gBB, mB, n0) + (k**2) / (2 * mI) - (DP * k / mI)) * (w(k, gBB, mB, n0) + (k**2) / (2 * mI) + (DP * k / mI)) * (DP * k / mI)**2)) * (Wk(k, gBB, mB, n0)**2) * (k**3)
# val, abserr = quad(integrand, 0, kcutoff, epsabs=0, epsrel=1.49e-12)
# PB = n0 / (ur(mI, mB)**2 * (aIBi - aSi)**2) * val
# return DP + PB
def dirRF(dataset, kgrid, cParams, sParams):
CSAmp = dataset['Real_CSAmp'] + 1j * dataset['Imag_CSAmp']
Phase = dataset['Phase']
dVk = kgrid.dV()
tgrid = CSAmp.coords['t'].values
CSA0 = CSAmp.isel(t=0).values; CSA0 = CSA0.reshape(CSA0.size)
Phase0 = Phase.isel(t=0).values
DynOv_Vec = np.zeros(tgrid.size, dtype=complex)
for tind, t in enumerate(tgrid):
CSAt = CSAmp.sel(t=t).values; CSAt = CSAt.reshape(CSAt.size)
Phaset = Phase.sel(t=t).values
exparg = np.dot(np.abs(CSAt)**2 + np.abs(CSA0)**2 - 2 * CSA0.conjugate() * CSAt, dVk)
DynOv_Vec[tind] = np.exp(-1j * (Phaset - Phase0)) * np.exp((-1 / 2) * exparg)
# calculate polaron energy (energy of initial state CSA0)
[P, aIBi] = cParams
[mI, mB, n0, gBB] = sParams
dVk = kgrid.dV()
kzg_flat = kcos_func(kgrid)
gIB = g(kgrid, aIBi, mI, mB, n0, gBB)
PB0 = np.dot(kzg_flat * np.abs(CSA0)**2, dVk).real.astype(float)
DP0 = P - PB0
Energy0 = (P**2 - PB0**2) / (2 * mI) + np.dot(Omega(kgrid, DP0, mI, mB, n0, gBB) * np.abs(CSA0)**2, dVk) + gIB * (np.dot(Wk(kgrid, mB, n0, gBB) * CSA0, dVk) + np.sqrt(n0))**2
# calculate full dynamical overlap
DynOv_Vec = np.exp(1j * Energy0) * DynOv_Vec
ReDynOv_da = xr.DataArray(np.real(DynOv_Vec), coords=[tgrid], dims=['t'])
ImDynOv_da = xr.DataArray(np.imag(DynOv_Vec), coords=[tgrid], dims=['t'])
# DynOv_ds = xr.Dataset({'Real_DynOv': ReDynOv_da, 'Imag_DynOv': ImDynOv_da}, coords={'t': tgrid}, attrs=dataset.attrs)
DynOv_ds = dataset[['Real_CSAmp', 'Imag_CSAmp', 'Phase']]; DynOv_ds['Real_DynOv'] = ReDynOv_da; DynOv_ds['Imag_DynOv'] = ImDynOv_da; DynOv_ds.attrs = dataset.attrs
return DynOv_ds
# def spectFunc(t_Vec, S_Vec, tdecay):
# # spectral function (Fourier Transform of dynamical overlap) using convention A(omega) = 2*Re[\int {S(t)*e^(-i*omega*t)}]
# dt = t_Vec[1] - t_Vec[0]
# Nt = t_Vec.size
# decayFactor = np.exp(-1 * t_Vec / tdecay)
# Sarg = S_Vec * decayFactor
# sf_preshift = 2 * np.real(dt * np.fft.fft(Sarg))
# sf = np.fft.fftshift(sf_preshift)
# omega = np.fft.fftshift((2 * np.pi / dt) * np.fft.fftfreq(Nt))
# return omega, sf
def spectFunc(t_Vec, S_Vec, tdecay):
# spectral function (Fourier Transform of dynamical overlap) using convention A(omega) = 2*Re[\int {S(t)*e^(i*omega*t)}]
dt = t_Vec[1] - t_Vec[0]
Nt = t_Vec.size
domega = 2 * np.pi / (Nt * dt)
decayFactor = np.exp(-1 * t_Vec / tdecay)
Sarg = S_Vec * decayFactor
sf_preshift = np.real((2 * np.pi / domega) * np.fft.ifft(Sarg))
# sf_preshift = 2 * np.real((2 * np.pi / domega) * np.fft.ifft(Sarg))
sf = np.fft.fftshift(sf_preshift)
omega = np.fft.fftshift((2 * np.pi / dt) * np.fft.fftfreq(Nt))
return omega, sf
def Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB):
dVk = kgrid.dV()
kzg_flat = kcos_func(kgrid)
Wk_grid = Wk(kgrid, mB, n0, gBB)
Wki_grid = 1 / Wk_grid
amplitude = CSAmp.reshape(CSAmp.size)
PB = np.dot(kzg_flat * np.abs(amplitude)**2, dVk).real.astype(float)
DP = P - PB
Omega_grid = Omega(kgrid, DP, mI, mB, n0, gBB)
gnum = g(kgrid, aIBi, mI, mB, n0, gBB)
xp = 0.5 * np.dot(Wk_grid, amplitude * dVk)
xm = 0.5 * np.dot(Wki_grid, amplitude * dVk)
En = ((P**2 - PB**2) / (2 * mI) +
np.dot(dVk * Omega_grid, np.abs(amplitude)**2) +
gnum * (2 * np.real(xp) + | np.sqrt(n0) | numpy.sqrt |
"""
"""
import gzip
import pickle
import astropy.units as u
import numpy as np
from template_builder.utilities import *
from template_builder.extend_templates import *
from ctapipe.coordinates import CameraFrame, NominalFrame, GroundFrame, \
TiltedGroundFrame
from astropy.coordinates import SkyCoord, AltAz
from astropy.time import Time
from ctapipe.io import EventSource
from ctapipe.reco import ImPACTReconstructor
from tqdm import tqdm
from ctapipe.image import tailcuts_clean, dilate
from ctapipe.calib import CameraCalibrator
from ctapipe.image.extractor import FullWaveformSum, FixedWindowSum
from ctapipe.calib.camera.gainselection import ThresholdGainSelector
class TemplateFitter:
def __init__(self, eff_fl=1,
bounds=((-5, 1), (-1.5, 1.5)),
bins=(601, 301),
min_fit_pixels=3000,
xmax_bins=np.linspace(-150, 200, 15),
offset_bins=np.array([0.0])*u.deg,
verbose=False,
rotation_angle=0 * u.deg,
tailcuts=(7, 14), min_amp=30, local_distance_cut=2.*u.deg,
gain_threshold=30000):
"""[summary]
Args:
eff_fl (int, optional): [description]. Defaults to 1.
bounds (tuple, optional): [description]. Defaults to ((-5, 1), (-1.5, 1.5)).
bins (tuple, optional): [description]. Defaults to (601, 301).
min_fit_pixels (int, optional): [description]. Defaults to 3000.
xmax_bins ([type], optional): [description]. Defaults to np.linspace(-150, 200, 15).
maximum_offset ([type], optional): [description]. Defaults to 10*u.deg.
verbose (bool, optional): [description]. Defaults to False.
rotation_angle ([type], optional): [description]. Defaults to 0*u.deg.
tailcuts (tuple, optional): [description]. Defaults to (7, 14).
min_amp (int, optional): [description]. Defaults to 30.
local_distance_cut ([type], optional): [description]. Defaults to 2.*u.deg.
gain_threshold (int, optional): [description]. Defaults to 30000.
"""
self.verbose = verbose
self.xmax_bins = xmax_bins
self.eff_fl = eff_fl
self.bounds = bounds
self.bins = bins
self.min_fit_pixels = min_fit_pixels
self.rotation_angle = rotation_angle
self.offset_bins = np.sort(offset_bins)
self.tailcuts = tailcuts
self.min_amp = min_amp
self.local_distance_cut = local_distance_cut
self.templates = dict() # Pixel amplitude
self.template_fit = dict() # Pixel amplitude
self.template_fit_kde = dict() # Pixel amplitude
self.templates_xb = dict() # Rotated X position
self.templates_yb = dict() # Rotated Y positions
self.correction = dict()
self.count = dict() # Count of events in a given template
self.count_total = 0 # Total number of events
self.gain_threshold = gain_threshold
def read_templates(self, filename, max_events=1000000):
"""
This is a pretty standard ctapipe event loop that calibrates events, rotates
them into a common frame and then stores the pixel values in a list
:param filename: str
Location of input
:param max_events: int
Maximum number of events to include in the loop
:param fill_correction: bool
Fill correction factor table
:return: tuple
Return 3 lists of amplitude and rotated x,y positions of all pixels in all
events
"""
# Create dictionaries to contain our output
if max_events > 0:
print("Warning if limiting event numbers the zero fraction may no longer be correct")
else:
max_events = 1e10
# Create a dummy time for our AltAz objects
dummy_time = Time('2010-01-01T00:00:00', format='isot', scale='utc')
if self.verbose:
print("Reading", filename.strip())
source = EventSource(filename, max_events=max_events, gain_selector_type='ThresholdGainSelector')
source.gain_selector.threshold = self.gain_threshold # Set our threshodl for gain selection
# This value is currently set for HESS, need to make this more flexible in future
calib = CameraCalibrator(subarray=source.subarray, image_extractor=FixedWindowSum(source.subarray,
window_width=16, window_shift=3, peak_index=3,
apply_integration_correction=False))
self.count_total += source.simulation_config.num_showers
grd_tel = None
num = 0 # Event counter
scaling_filled = False
for event in source:
calib(event)
alt = event.pointing.array_altitude
if alt > 90 * u.deg:
alt = 90*u.deg
point = SkyCoord(alt=alt, az=event.pointing.array_azimuth,
frame=AltAz(obstime=dummy_time))
if not scaling_filled:
xmax_scale = create_xmax_scaling(self.xmax_bins, self.offset_bins, point, filename)
scaling_filled = True
# Create coordinate objects for source position
src = SkyCoord(alt=event.simulation.shower.alt.value * u.rad,
az=event.simulation.shower.az.value * u.rad,
frame=AltAz(obstime=dummy_time))
alt_evt = event.simulation.shower.alt
if alt_evt > 90 * u.deg:
alt_evt = 90*u.deg
#print("here1", point.separation(src), self.maximum_offset)
#if point.separation(src) > self.maximum_offset:
# continue
offset_bin = find_nearest_bin(self.offset_bins, point.separation(src)).value
zen = 90 - event.simulation.shower.alt.to(u.deg).value
# Store simulated Xmax
mc_xmax = event.simulation.shower.x_max.value / np.cos(np.deg2rad(zen))
# And transform into nominal system (where we store our templates)
source_direction = src.transform_to(NominalFrame(origin=point))
# Store simulated event energy
energy = event.simulation.shower.energy
# Store ground position of all telescopes
# We only want to do this once, but has to be done in event loop
if grd_tel is None:
grd_tel = source.subarray.tel_coords
# Convert to tilted system
tilt_tel = grd_tel.transform_to(
TiltedGroundFrame(pointing_direction=point))
# Calculate core position in tilted system
grd_core_true = SkyCoord(x=np.asarray(event.simulation.shower.core_x) * u.m,
y=np.asarray(event.simulation.shower.core_y) * u.m,
z=np.asarray(0) * u.m, frame=GroundFrame())
tilt_core_true = grd_core_true.transform_to(TiltedGroundFrame(
pointing_direction=point))
# Loop over triggered telescopes
for tel_id, dl1 in event.dl1.tel.items():
# Get pixel signal
pmt_signal = dl1.image
# Get pixel coordinates and convert to the nominal system
geom = source.subarray.tel[tel_id].camera.geometry
fl = source.subarray.tel[tel_id].optics.equivalent_focal_length * \
self.eff_fl
camera_coord = SkyCoord(x=geom.pix_x, y=geom.pix_y,
frame=CameraFrame(focal_length=fl,
telescope_pointing=point))
nom_coord = camera_coord.transform_to(
NominalFrame(origin=point))
x = nom_coord.fov_lon.to(u.deg)
y = nom_coord.fov_lat.to(u.deg)
# Calculate expected rotation angle of the image
phi = np.arctan2((tilt_tel.y[tel_id - 1] - tilt_core_true.y),
(tilt_tel.x[tel_id - 1] - tilt_core_true.x)) + \
90 * u.deg
phi += self.rotation_angle
# And the impact distance of the shower
impact = np.sqrt(np.power(tilt_tel.x[tel_id - 1] - tilt_core_true.x, 2) +
np.power(tilt_tel.y[tel_id - 1] - tilt_core_true.y, 2)). \
to(u.m).value
# now rotate and translate our images such that they lie on top of one
# another
x, y = \
ImPACTReconstructor.rotate_translate(x, y,
source_direction.fov_lon,
source_direction.fov_lat,
phi)
x *= -1
# We only want to keep pixels that fall within the bounds of our
# final template
mask = np.logical_and(x > self.bounds[0][0] * u.deg,
x < self.bounds[0][1] * u.deg)
mask = np.logical_and(mask, y < self.bounds[1][1] * u.deg)
mask = np.logical_and(mask, y > self.bounds[1][0] * u.deg)
mask510 = tailcuts_clean(geom, pmt_signal,
picture_thresh=self.tailcuts[0],
boundary_thresh=self.tailcuts[1],
min_number_picture_neighbors=1)
amp_sum = np.sum(pmt_signal[mask510])
x_cent = np.sum(pmt_signal[mask510] * x[mask510]) / amp_sum
y_cent = np.sum(pmt_signal[mask510] * y[mask510]) / amp_sum
mask = mask510
for i in range(4):
mask = dilate(geom, mask)
# Make our preselection cuts
if amp_sum < self.min_amp and np.sqrt(x_cent**2 + y_cent**2) < self.local_distance_cut:
continue
# Make sure everything is 32 bit
x = x[mask].astype(np.float32)
y = y[mask].astype(np.float32)
image = pmt_signal[mask].astype(np.float32)
zen = 90 - alt_evt.to(u.deg).value
# Store simulated Xmax
mc_xmax = event.simulation.shower.x_max.value / np.cos(np.deg2rad(zen))
# Calc difference from expected Xmax (for gammas)
exp_xmax =xmax_expectation(energy.value)
x_diff = mc_xmax - exp_xmax
x_diff_bin = find_nearest_bin(self.xmax_bins, x_diff)
az = point.az.to(u.deg).value
zen = 90. - point.alt.to(u.deg).value
# Now fill up our output with the X, Y and amplitude of our pixels
key = zen, az, energy.value, int(impact), x_diff_bin, offset_bin
if (key) in self.templates.keys():
# Extend the list if an entry already exists
self.templates[key].extend(image)
self.templates_xb[key].extend(x.to(u.deg).value)
self.templates_yb[key].extend(y.to(u.deg).value)
self.count[key] = self.count[key] + (1 * xmax_scale[(x_diff_bin, offset_bin)])
else:
self.templates[key] = image.tolist()
self.templates_xb[key] = x.value.tolist()
self.templates_yb[key] = y.value.tolist()
self.count[key] = 1 * xmax_scale[(x_diff_bin, offset_bin)]
if num > max_events:
return self.templates, self.templates_xb, self.templates_yb
num += 1
return self.templates, self.templates_xb, self.templates_yb
def fit_templates(self, amplitude, x_pos, y_pos,
make_variance_template, max_fitpoints):
"""
Perform MLP fit over a dictionary of pixel lists
:param amplitude: dict
Dictionary of pixel amplitudes for each template
:param x_pos: dict
Dictionary of x position for each template
:param y_pos: dict
Dictionary of y position for each template
:param make_variance_template: bool
Should we also make a template of variance
:param max_fitpoints: int
Maximum number of points to include in MLP fit
:return: dict
Dictionary of image templates
"""
if self.verbose:
print("Fitting Templates")
# Create output dictionary
templates_out = dict()
variance_templates_out = dict()
# Create grid over which to evaluate our fit
x = | np.linspace(self.bounds[0][0], self.bounds[0][1], self.bins[0]) | numpy.linspace |
#!/usr/bin/python
import itertools
import numpy as np
import pytest
from scipy import stats
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.svm import SVC
GAMMA = 1.
COEF0 = 1.
def main():
# polynomial()
cross_validation()
# rbf()
def read_dataset(dataset_type):
dataset = np.loadtxt('features.' + dataset_type)
return dataset[:, 1:], dataset[:, 0] # X, y
def polynomial():
clf = SVC(C=.01, kernel='poly', degree=2, gamma=GAMMA, coef0=COEF0)
for offset in [0, 1]:
num_supports, E_ins = [], []
digits = np.array([0, 2, 4, 6, 8], dtype=float) + offset
for digit in digits:
X_training, y_training = read_dataset('train')
y_training[~np.isclose(y_training, digit)] = -1.
clf.fit(X_training, y_training)
E_ins.append(1 - clf.score(X_training, y_training))
num_supports.append(clf.n_support_.sum())
chosen_idx = np.argmax(E_ins) if offset == 0 else np.argmin(E_ins)
print('digit={}: E_in={}, num_supports={}'.format(digits[chosen_idx],
E_ins[chosen_idx], num_supports[chosen_idx]))
print('\n--------------------\n')
X_training, y_training = read_dataset('train')
one_or_five = np.isclose(y_training, 1.) | | np.isclose(y_training, 5.) | numpy.isclose |
'''
Author: Dr. <NAME> <<EMAIL>>
This package is distributed under New BSD license.
'''
from __future__ import division
import numpy as np
import scipy.sparse
from six.moves import range
from numbers import Integral
from smt.utils.linear_solvers import get_solver, LinearSolver, VALID_SOLVERS
from smt.utils.line_search import get_line_search_class, LineSearch, VALID_LINE_SEARCHES
from smt.utils.caching import cached_operation
from smt.surrogate_models.surrogate_model import SurrogateModel
class RMTS(SurrogateModel):
"""
Regularized Minimal-energy Tensor-product Spline interpolant base class for RMTC and RMTB.
"""
def _initialize(self):
super(RMTS, self)._initialize()
declare = self.options.declare
supports = self.supports
declare('xlimits', types=np.ndarray,
desc='Lower/upper bounds in each dimension - ndarray [nx, 2]')
declare('smoothness', 1.0, types=(Integral, float, tuple, list, np.ndarray),
desc='Smoothness parameter in each dimension - length nx. None implies uniform')
declare('regularization_weight', 1e-14, types=(Integral, float),
desc='Weight of the term penalizing the norm of the spline coefficients.' +
' This is useful as an alternative to energy minimization ' +
' when energy minimization makes the training time too long.')
declare('energy_weight', 1e-4, types=(Integral, float),
desc='The weight of the energy minimization terms')
declare('extrapolate', False, types=bool,
desc='Whether to perform linear extrapolation for external evaluation points')
declare('min_energy', True, types=bool,
desc='Whether to perform energy minimization')
declare('approx_order', 4, types=Integral,
desc='Exponent in the approximation term')
declare('solver', 'krylov', values=VALID_SOLVERS, types=LinearSolver,
desc='Linear solver')
declare('derivative_solver', 'krylov', values=VALID_SOLVERS, types=LinearSolver,
desc='Linear solver used for computing output derivatives (dy_dyt)')
declare('grad_weight', 0.5, types=(Integral, float),
desc='Weight on gradient training data')
declare('solver_tolerance', 1e-12, types=(Integral, float),
desc='Convergence tolerance for the nonlinear solver')
declare('nonlinear_maxiter', 10, types=Integral,
desc='Maximum number of nonlinear solver iterations')
declare('line_search', 'backtracking', values=VALID_LINE_SEARCHES, types=LineSearch,
desc='Line search algorithm')
declare('save_energy_terms', False, types=bool,
desc='Whether to cache energy terms in the data_dir directory')
declare('data_dir', None, values=(None,), types=str,
desc='Directory for loading / saving cached data; None means do not save or load')
declare('max_print_depth', 5, types=Integral,
desc='Maximum depth (level of nesting) to print operation descriptions and times')
supports['training_derivatives'] = True
supports['derivatives'] = True
supports['output_derivatives'] = True
def _setup_hessian(self):
diag = np.ones(self.num['dof'])
arange = np.arange(self.num['dof'])
full_hess = scipy.sparse.csc_matrix((diag, (arange, arange)))
return full_hess
def _compute_jac(self, ix1, ix2, x):
data, rows, cols = self._compute_jac_raw(ix1, ix2, x)
n = x.shape[0]
full_jac = scipy.sparse.csc_matrix((data, (rows, cols)), shape=(n, self.num['coeff']))
if self.full_dof2coeff is not None:
full_jac = full_jac * self.full_dof2coeff
return full_jac
def _compute_approx_terms(self):
# This computes the approximation terms for the training points.
# We loop over kx: 0 is for values and kx>0 represents.
# the 1-based index of the derivative given by the training point data.
num = self.num
xlimits = self.options['xlimits']
full_jac_dict = {}
for kx in self.training_points[None]:
xt, yt = self.training_points[None][kx]
xmin = np.min(xt, axis=0)
xmax = np.max(xt, axis=0)
assert np.all(xlimits[:, 0] <= xmin), 'Training points below min for %s' % kx
assert np.all(xlimits[:, 1] >= xmax), 'Training points above max for %s' % kx
if kx == 0:
c = 1.0
else:
self.options['grad_weight'] / xlimits.shape[0]
full_jac = self._compute_jac(kx, 0, xt)
full_jac_dict[kx] = (full_jac, full_jac.T.tocsc(), c)
return full_jac_dict
def _compute_energy_terms(self):
# This computes the energy terms that are to be minimized.
# The quadrature points are the centroids of the multi-dimensional elements.
num = self.num
xlimits = self.options['xlimits']
inputs = {}
inputs['nx'] = xlimits.shape[0]
inputs['elem_list'] = num['elem_list']
if self.__class__.__name__ == 'RMTB':
inputs['num_ctrl_list'] = num['ctrl_list']
inputs['order_list'] = num['order_list']
if self.options['save_energy_terms']:
cache_dir = self.options['data_dir']
else:
cache_dir = None
with cached_operation(inputs, cache_dir) as outputs:
if outputs:
sq_mtx = outputs['sq_mtx']
else:
n = np.prod(2 * num['elem_list'])
x = np.empty(n * num['x'])
self.rmtsc.compute_quadrature_points(
n, np.array(2 * num['elem_list'], dtype=np.int32), x)
x = x.reshape((n, num['x']))
sq_mtx = [None] * num['x']
for kx in range(num['x']):
mtx = self._compute_jac(kx+1, kx+1, x)
sq_mtx[kx] = mtx.T.tocsc() * mtx * (xlimits[kx, 1] - xlimits[kx, 0]) ** 4
outputs['sq_mtx'] = sq_mtx
elem_vol = | np.prod((xlimits[:, 1] - xlimits[:, 0]) / (2 * num['elem_list'])) | numpy.prod |
import numpy as np
import enum
from scipy.spatial.transform import Rotation
from scipy.spatial.distance import pdist, squareform
from casadi import *
from scipy.optimize import nnls
from rrc_iprl_package.control.contact_point import ContactPoint
from trifinger_simulation.tasks import move_cube
from rrc_iprl_package.traj_opt.fixed_contact_point_opt import FixedContactPointOpt
from rrc_iprl_package.traj_opt.fixed_contact_point_system import FixedContactPointSystem
from rrc_iprl_package.traj_opt.static_object_opt import StaticObjectOpt
class PolicyMode(enum.Enum):
RESET = enum.auto()
TRAJ_OPT = enum.auto()
IMPEDANCE = enum.auto()
RL_PUSH = enum.auto()
RESIDUAL = enum.auto()
# Object properties
OBJ_MASS = 0.016 # 16 grams
OBJ_SIZE = move_cube._CUBOID_SIZE
OBJ_SIZE_OFFSET = 0.012
OBJ_MU = 1
# Here, hard code the base position of the fingers (as angle on the arena)
r = 0.15
theta_0 = 80
theta_1 = 310
theta_2 = 200
FINGER_BASE_POSITIONS = [
np.array([[np.cos(theta_0*(np.pi/180))*r, np.sin(theta_0*(np.pi/180))*r, 0]]),
np.array([[np.cos(theta_1*(np.pi/180))*r, np.sin(theta_1*(np.pi/180))*r, 0]]),
np.array([[np.cos(theta_2*(np.pi/180))*r, np.sin(theta_2*(np.pi/180))*r, 0]]),
]
BASE_ANGLE_DEGREES = [0, -120, -240]
# Information about object faces given face_id
OBJ_FACES_INFO = {
1: {"center_param": np.array([0.,-1.,0.]),
"face_down_default_quat": np.array([0.707,0,0,0.707]),
"adjacent_faces": [6,4,3,5],
"opposite_face": 2,
"up_axis": np.array([0.,1.,0.]), # UP axis when this face is ground face
},
2: {"center_param": np.array([0.,1.,0.]),
"face_down_default_quat": np.array([-0.707,0,0,0.707]),
"adjacent_faces": [6,4,3,5],
"opposite_face": 1,
"up_axis": np.array([0.,-1.,0.]),
},
3: {"center_param": np.array([1.,0.,0.]),
"face_down_default_quat": np.array([0,0.707,0,0.707]),
"adjacent_faces": [1,2,4,6],
"opposite_face": 5,
"up_axis": np.array([-1.,0.,0.]),
},
4: {"center_param": np.array([0.,0.,1.]),
"face_down_default_quat": np.array([0,1,0,0]),
"adjacent_faces": [1,2,3,5],
"opposite_face": 6,
"up_axis": np.array([0.,0.,-1.]),
},
5: {"center_param": np.array([-1.,0.,0.]),
"face_down_default_quat": np.array([0,-0.707,0,0.707]),
"adjacent_faces": [1,2,4,6],
"opposite_face": 3,
"up_axis": np.array([1.,0.,0.]),
},
6: {"center_param": np.array([0.,0.,-1.]),
"face_down_default_quat": np.array([0,0,0,1]),
"adjacent_faces": [1,2,3,5],
"opposite_face": 4,
"up_axis": np.array([0.,0.,1.]),
},
}
CUBOID_SHORT_FACES = [1,2]
CUBOID_LONG_FACES = [3,4,5,6]
"""
Compute wrench that needs to be applied to object to maintain it on desired trajectory
"""
def track_obj_traj_controller(x_des, dx_des, x_cur, dx_cur, Kp, Kv):
#print(x_des)
#print(x_cur.position, x_cur.orientation)
#print(dx_des)
#print(dx_cur)
g = np.array([0, 0, -9.81, 0, 0, 0]) # Gravity vector
# Force (compute position error)
p_delta = (x_des[0:3] - x_cur.position)
dp_delta = (dx_des[0:3] - dx_cur[0:3])
# Moment (compute orientation error)
# Compute difference between desired and current quaternion
R_des = Rotation.from_quat(x_des[3:])
R_cur = Rotation.from_quat(x_cur.orientation)
o_delta = np.zeros(3)
for i in range(3):
o_delta += -0.5 * np.cross(R_cur.as_matrix()[:,i], R_des.as_matrix()[:,i])
do_delta = (dx_des[3:] - dx_cur[3:]) # is this the angular velocity?
#print("p_delta: {}".format(p_delta))
#print("dp_delta: {}".format(dp_delta))
#print("o_delta: {}".format(o_delta))
#print("do_delta: {}".format(do_delta))
# Compute wrench W (6x1) with PD feedback law
x_delta = np.concatenate((p_delta, -1*o_delta))
dx_delta = np.concatenate((dp_delta, do_delta))
W = Kp @ x_delta + Kv @ dx_delta - OBJ_MASS * g
print("x_delta: {}".format(x_delta))
print("dx_delta: {}".format(dx_delta))
#print(W)
return W
"""
Compute fingertip forces necessary to keep object on desired trajectory
"""
def get_ft_forces(x_des, dx_des, x_cur, dx_cur, Kp, Kv, cp_params):
# Get desired wrench for object COM to track obj traj
W = track_obj_traj_controller(x_des, dx_des, x_cur, dx_cur, Kp, Kv)
# Get list of contact point positions and orientations in object frame
# By converting cp_params to contactPoints
cp_list = []
for cp_param in cp_params:
if cp_param is not None:
cp = get_cp_of_from_cp_param(cp_param)
cp_list.append(cp)
fnum = len(cp_list)
# To compute grasp matrix
G = __get_grasp_matrix( | np.concatenate((x_cur.position, x_cur.orientation)) | numpy.concatenate |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&193': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&194': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&195': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&196': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&197': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&198': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&199': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&200': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&201': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&202': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&203': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&204': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&205': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&206': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&207': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&208': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&209': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&210': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&211': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&212': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&213': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&214': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&215': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&216': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&217': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&218': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&219': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&220': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&221': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&222': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&223': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&224': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&225': np.array([-0.04777085826693217, -0.931704979630315]),
'versicolor&0&226': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&227': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&228': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&229': np.array([-0.46216647196120714, 0.35468591243823655]),
'versicolor&0&230': np.array([-0.016252316132452975, -0.9640854286687816]),
'versicolor&0&231': np.array([-0.3707180757031537, -0.1977196581472426]),
'versicolor&0&232': np.array([-0.1043459833293615, -0.5233314327065356]),
'versicolor&0&233': np.array([-0.049289647556763364, -0.8736084405111605]),
'versicolor&0&234': np.array([-0.34078174031874375, -0.25874482325965437]),
'versicolor&0&235': np.array([-0.050841051273783675, -0.5877587283589205]),
'versicolor&0&236': np.array([-0.0161720977425142, -0.9096817855236822]),
'versicolor&0&237': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&238': np.array([-0.44101924439572626, 0.5583264842761904]),
'versicolor&0&239': np.array([-0.5844994389588399, 0.5715208832363579]),
'versicolor&0&240': np.array([-0.11329659732608087, -0.8671819100849522]),
'versicolor&0&241': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&242': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&243': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&244': np.array([-0.6814868825686854, 0.35066801608083215]),
'versicolor&0&245': np.array([-0.040390637135858574, -0.9402832917474078]),
'versicolor&0&246': np.array([-0.6425009695928476, -0.24851992476830956]),
'versicolor&0&247': np.array([-0.5151243662384031, -0.3255567772442641]),
'versicolor&0&248': np.array([-0.16157511199607094, -0.7754323813403634]),
'versicolor&0&249': np.array([-0.6300442788906601, -0.28361140069713875]),
'versicolor&0&250': np.array([-0.4875864856121089, -0.3614122096616301]),
'versicolor&0&251': np.array([-0.08968204532514226, -0.8491191210330045]),
'versicolor&0&252': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&253': np.array([-0.5276460255602035, 0.28992233541586077]),
'versicolor&0&254': np.array([-0.6392402874163683, 0.24114611970435948]),
'versicolor&0&255': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&256': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&257': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&258': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&259': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&260': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&261': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&262': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&263': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&264': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&265': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&266': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&267': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&268': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&269': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&270': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&271': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&275': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&276': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&277': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&278': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&279': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&280': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&281': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&285': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&286': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&290': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&291': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&292': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&293': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&294': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&295': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&296': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&300': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&301': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&305': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&306': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&307': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&308': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&309': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&310': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&311': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&1&0': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&1': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&2': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&3': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&4': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&5': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&6': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&7': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&8': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&9': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&10': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&11': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&12': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&13': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&14': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&15': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&16': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&17': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&18': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&19': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&20': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&21': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&22': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&23': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&24': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&25': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&26': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&27': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&28': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&29': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&30': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&31': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&32': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&33': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&34': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&35': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&36': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&37': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&38': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&39': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&40': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&41': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&42': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&43': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&44': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&45': np.array([0.7749499208750119, 0.8147189440804429]),
'versicolor&1&46': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'versicolor&1&50': np.array([0.8040309195416899, 0.8445152504134819]),
'versicolor&1&51': np.array([0.18555813792691386, 0.6940923833143309]),
'versicolor&1&52': np.array([0.32639262064172164, 0.6296083447134281]),
'versicolor&1&53': np.array([0.6964303997553315, 0.7444536452136676]),
'versicolor&1&54': np.array([0.18216358701833335, 0.747615101407194]),
'versicolor&1&55': np.array([0.33549445287370383, 0.6526039763053625]),
'versicolor&1&56': np.array([0.7213651642695392, 0.7718874443854203]),
'versicolor&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'versicolor&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'versicolor&1&60': np.array([0.4933316375690332, 0.5272416708629276]),
'versicolor&1&61': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'versicolor&1&65': np.array([0.5041830043657418, 0.5392782673950876]),
'versicolor&1&66': np.array([0.1413116283690917, 0.7479856297394165]),
'versicolor&1&67': np.array([0.189773257421942, 0.6552150653012478]),
'versicolor&1&68': np.array([0.40694846236352233, 0.5109051764198169]),
'versicolor&1&69': np.array([0.1390424906594644, 0.7991613016301518]),
'versicolor&1&70': np.array([0.1945777487290197, 0.6743932844312892]),
'versicolor&1&71': np.array([0.415695226122737, 0.5230815102377903]),
'versicolor&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'versicolor&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'versicolor&1&75': np.array([0.0, 0.4756207622944677]),
'versicolor&1&76': np.array([0.0, 0.4854334805210761]),
'versicolor&1&77': np.array([0.0, 0.16885577975809635]),
'versicolor&1&78': np.array([0.0, 0.395805885538554]),
'versicolor&1&79': np.array([0.0, 0.2538072707138344]),
'versicolor&1&80': np.array([0.0, 0.4854334805210761]),
'versicolor&1&81': np.array([0.0, 0.7613919530844643]),
'versicolor&1&82': np.array([0.0, 0.6668230985485095]),
'versicolor&1&83': np.array([0.0, 0.4904755652105692]),
'versicolor&1&84': np.array([0.0, 0.8121046082359693]),
'versicolor&1&85': np.array([0.0, 0.6855766903749089]),
'versicolor&1&86': np.array([0.0, 0.5008471974438506]),
'versicolor&1&87': np.array([0.0, 0.16885577975809635]),
'versicolor&1&88': np.array([0.0, 0.16885577975809635]),
'versicolor&1&89': np.array([0.0, 0.395805885538554]),
'versicolor&1&90': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&91': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&92': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&93': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&94': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&95': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&96': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&97': np.array([0.08302493125394889, 0.6186280682763334]),
'versicolor&1&98': np.array([0.22125635302655813, 0.2925832702358638]),
'versicolor&1&99': np.array([0.2365788606456636, 0.7120007179768731]),
'versicolor&1&100': np.array([0.022347126801293967, 0.6718013300441928]),
'versicolor&1&101': np.array([0.10063786451829529, 0.4085974066833644]),
'versicolor&1&102': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&103': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&104': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&105': np.array([-0.32199975656257646, 0.7482293552463756]),
'versicolor&1&106': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&107': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&108': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&109': np.array([0.2619265016777598, 0.33491141590339474]),
'versicolor&1&110': np.array([-0.43843349141088417, 0.8642740701867917]),
'versicolor&1&111': np.array([0.20183015430619713, 0.7445346002055082]),
'versicolor&1&112': np.array([-0.05987874887638573, 0.6927937290176818]),
'versicolor&1&113': np.array([-0.2562642052727569, 0.6920266972283227]),
'versicolor&1&114': np.array([0.1736438124560164, 0.7898174616442941]),
'versicolor&1&115': np.array([-0.10114089899940126, 0.7326610366533243]),
'versicolor&1&116': np.array([-0.34479806250338163, 0.7789143553916729]),
'versicolor&1&117': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&118': np.array([0.7141739659554727, 0.6619819140152878]),
'versicolor&1&119': np.array([0.44460014335081516, 0.6107546840046902]),
'versicolor&1&120': np.array([0.8224435822504677, 0.05315271528828394]),
'versicolor&1&121': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&122': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&123': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&124': np.array([0.8476206690613984, 0.02146454924522743]),
'versicolor&1&125': np.array([0.820222886307464, 0.055413714884152906]),
'versicolor&1&126': np.array([0.69362517791403, 0.2579390890424607]),
'versicolor&1&127': np.array([0.7261791877801502, 0.16248655642013624]),
'versicolor&1&128': np.array([0.8190416077589757, 0.05661509439536992]),
'versicolor&1&129': np.array([0.6654762076749751, 0.2949291633432878]),
'versicolor&1&130': np.array([0.7118161070185614, 0.17683644094125878]),
'versicolor&1&131': np.array([0.8165214253946836, 0.059175619390630096]),
'versicolor&1&132': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&133': np.array([0.8393089066702096, 0.0788980157959197]),
'versicolor&1&134': np.array([0.8282924295054531, 0.0752641855714259]),
'versicolor&1&135': np.array([0.5188109114552927, 0.03638964581864269]),
'versicolor&1&136': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&137': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&138': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&139': np.array([0.5436097000280874, 0.1461891067488832]),
'versicolor&1&140': np.array([0.5131478569192371, 0.04203387599862816]),
'versicolor&1&141': np.array([0.32513442685780247, 0.6124765483184536]),
'versicolor&1&142': np.array([0.1812883360919208, 0.5504982486874137]),
'versicolor&1&143': np.array([0.4788153032824012, 0.08625929936974323]),
'versicolor&1&144': np.array([0.28490718210609345, 0.6650298146522879]),
'versicolor&1&145': np.array([0.1313204067730033, 0.597079642504441]),
'versicolor&1&146': np.array([0.46583127837967303, 0.09875847161509169]),
'versicolor&1&147': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&148': np.array([0.73294627367007, 0.4610490766898855]),
'versicolor&1&149': np.array([0.5965042032375719, 0.48856644624972617]),
'versicolor&1&150': np.array([0.37157553889555184, 0.1221600832023858]),
'versicolor&1&151': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&152': np.array([0.9105775730167809, 0.6842162738602727]),
'versicolor&1&153': np.array([0.6718337295341267, 0.6620422637360075]),
'versicolor&1&154': np.array([0.4964962439921071, 0.3798215458387346]),
'versicolor&1&155': np.array([0.2463036871609408, 0.24630368716093934]),
'versicolor&1&156': np.array([0.2805345936193346, 0.6595182922149835]),
'versicolor&1&157': | np.array([0.08302493125394889, 0.6186280682763334]) | numpy.array |
import os, sys
import pickle
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import scipy.spatial.distance as dist
def estimate_top_view_sigmas(pkl_file):
''' call this on 'AMT_data/top/AMT15_csv.pkl'
we estimate keypoint accuracy in terms of the Object Keypoint Similarity (OKS) after Ronchi and Perona 2017.
The OKS between a detection theta-hat(p) and the annotation theta(p) of a person p is the average over the
labeled parts in the ground-truth (v_i) of the Keypoint Similarity between corresponding keypoint pairs.
From equation 1 in that paper, the Keypoint Similarity (ks) is defined as:
ks(theta-hat(p), theta(p)) = exp( - ||theta-hat_(p) - theta(p)||^2_2 / (2*s^2*k^2)
Where s is the area of the instance, measured in pixels, and k is a keypoint-specific standard deviation
that estimates the degree of between-annotator variability in labeling that point.
In the CoCo evaluation API, the value of k for annotations of human poses ranges from 0.025 to 0.107,
with more reliably placed body parts (such as the eyes) having the lowest values of k.
should return:
array([0.03889304, 0.04519708, 0.04505679, 0.04170943, 0.06655941,
0.0666152 , 0.04375891, 0.06663153, 0.08359647])
'''
with open(pkl_file,'rb') as fp:
D = pickle.load(fp)
area_vec = []
X = []
Y= []
for j in range(9):
x = []
y = []
for i in range(len(D)):
x.append((D[i]['ann_B']['X'][:,j]*1024).tolist()) # scale by top-view camera dimensions
x.append((D[i]['ann_W']['X'][:,j]*1024).tolist())
y.append((D[i]['ann_B']['Y'][:,j]*570).tolist())
y.append((D[i]['ann_W']['Y'][:,j]*570).tolist())
X.append(x)
Y.append(y)
for i in range(len(D)):
area_vec.append(D[i]['ann_B']['area'])
area_vec.append(D[i]['ann_W']['area'])
X = np.asarray(X)
Y = np.asarray(Y)
area_vec = np.asarray(area_vec)
D = np.zeros((len(X),len(X[0])))
sigma = np.zeros(len(X))
for j in range(len(X)):
for i in range(len(X[0])):
xy = np.asarray([X[j][i], Y[j][i]]).reshape((2,len(X[j][i]))).T
gt = np.median(xy,0)
D[j][i] = np.mean(np.std(xy,0))
D[j][i] /= np.sqrt(area_vec[i])*np.sqrt(2) # normalization to match CoCo definition of sigma
sigma[j] = | np.mean(D[j]) | numpy.mean |
"""Utility functions for two- and three-dimensional vectors."""
__all__ = [
"quaternion_mult",
"quaternion_from_angle_axis",
"angle_axis_from_quaternion",
"quaternion_conjugate",
"rotate_vector",
"thick_diagonal",
"rotation_matrix",
"rotation_about_z",
"z_to_vector",
"angle_of_vector",
"angle_between_vectors",
"project_along_vector",
"normalize",
"get_unit_normal",
"compass_directions",
"regular_vertices",
"complex_to_R3",
"R3_to_complex",
"complex_func_to_R3_func",
"center_of_mass",
"midpoint",
"find_intersection",
"line_intersection",
"get_winding_number",
"cross2d",
"earclip_triangulation",
"perpendicular_bisector",
]
import itertools as it
import math
from functools import reduce
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from mapbox_earcut import triangulate_float32 as earcut
from .. import config
from ..constants import DOWN, OUT, PI, RIGHT, TAU
from ..utils.iterables import adjacent_pairs
def norm_squared(v: float) -> float:
return np.dot(v, v)
# Quaternions
# TODO, implement quaternion type
def quaternion_mult(
*quats: Sequence[float],
) -> Union[np.ndarray, List[Union[float, np.ndarray]]]:
"""Gets the Hamilton product of the quaternions provided.
For more information, check `this Wikipedia page
<https://en.wikipedia.org/wiki/Quaternion>`_.
Returns
-------
Union[np.ndarray, List[Union[float, np.ndarray]]]
Returns a list of product of two quaternions.
"""
if config.renderer == "opengl":
if len(quats) == 0:
return [1, 0, 0, 0]
result = quats[0]
for next_quat in quats[1:]:
w1, x1, y1, z1 = result
w2, x2, y2, z2 = next_quat
result = [
w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2,
w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2,
]
return result
else:
q1 = quats[0]
q2 = quats[1]
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
return np.array(
[
w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2,
w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2,
]
)
def quaternion_from_angle_axis(
angle: float, axis: np.ndarray, axis_normalized: bool = False
) -> List[float]:
"""Gets a quaternion from an angle and an axis.
For more information, check `this Wikipedia page
<https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles>`_.
Parameters
----------
angle
The angle for the quaternion.
axis
The axis for the quaternion
axis_normalized : bool, optional
Checks whether the axis is normalized, by default False
Returns
-------
List[float]
Gives back a quaternion from the angle and axis
"""
if config.renderer == "opengl":
if not axis_normalized:
axis = normalize(axis)
return [math.cos(angle / 2), *(math.sin(angle / 2) * axis)]
else:
return np.append(np.cos(angle / 2), np.sin(angle / 2) * normalize(axis))
def angle_axis_from_quaternion(quaternion: Sequence[float]) -> Sequence[float]:
"""Gets angle and axis from a quaternion.
Parameters
----------
quaternion
The quaternion from which we get the angle and axis.
Returns
-------
Sequence[float]
Gives the angle and axis
"""
axis = normalize(quaternion[1:], fall_back=np.array([1, 0, 0]))
angle = 2 * np.arccos(quaternion[0])
if angle > TAU / 2:
angle = TAU - angle
return angle, axis
def quaternion_conjugate(quaternion: Sequence[float]) -> np.ndarray:
"""Used for finding the conjugate of the quaternion
Parameters
----------
quaternion
The quaternion for which you want to find the conjugate for.
Returns
-------
np.ndarray
The conjugate of the quaternion.
"""
result = np.array(quaternion)
result[1:] *= -1
return result
def rotate_vector(vector: np.ndarray, angle: int, axis: np.ndarray = OUT) -> np.ndarray:
"""Function for rotating a vector.
Parameters
----------
vector
The vector to be rotated.
angle
The angle to be rotated by.
axis
The axis to be rotated, by default OUT
Returns
-------
np.ndarray
The rotated vector with provided angle and axis.
Raises
------
ValueError
If vector is not of dimension 2 or 3.
"""
if len(vector) == 2:
# Use complex numbers...because why not
z = complex(*vector) * np.exp(complex(0, angle))
return np.array([z.real, z.imag])
elif len(vector) == 3:
# Use quaternions...because why not
quat = quaternion_from_angle_axis(angle, axis)
quat_inv = quaternion_conjugate(quat)
product = reduce(quaternion_mult, [quat, np.append(0, vector), quat_inv])
return product[1:]
else:
raise ValueError("vector must be of dimension 2 or 3")
def thick_diagonal(dim: int, thickness=2) -> np.ndarray:
row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))
col_indices = np.transpose(row_indices)
return (np.abs(row_indices - col_indices) < thickness).astype("uint8")
def rotation_matrix_transpose_from_quaternion(quat: np.ndarray) -> List[np.ndarray]:
"""Converts the quaternion, quat, to an equivalent rotation matrix representation.
For more information, check `this page
<https://in.mathworks.com/help/driving/ref/quaternion.rotmat.html>`_.
Parameters
----------
quat
The quaternion which is to be converted.
Returns
-------
List[np.ndarray]
Gives back the Rotation matrix representation, returned as a 3-by-3
matrix or 3-by-3-by-N multidimensional array.
"""
quat_inv = quaternion_conjugate(quat)
return [
quaternion_mult(quat, [0, *basis], quat_inv)[1:]
for basis in [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
]
]
def rotation_matrix_from_quaternion(quat: np.ndarray) -> np.ndarray:
return np.transpose(rotation_matrix_transpose_from_quaternion(quat))
def rotation_matrix_transpose(angle: float, axis: np.ndarray) -> np.ndarray:
if axis[0] == 0 and axis[1] == 0:
# axis = [0, 0, z] case is common enough it's worth
# having a shortcut
sgn = 1 if axis[2] > 0 else -1
cos_a = math.cos(angle)
sin_a = math.sin(angle) * sgn
return [
[cos_a, sin_a, 0],
[-sin_a, cos_a, 0],
[0, 0, 1],
]
quat = quaternion_from_angle_axis(angle, axis)
return rotation_matrix_transpose_from_quaternion(quat)
def rotation_matrix(
angle: float, axis: np.ndarray, homogeneous: bool = False
) -> np.ndarray:
"""
Rotation in R^3 about a specified axis of rotation.
"""
about_z = rotation_about_z(angle)
z_to_axis = z_to_vector(axis)
axis_to_z = np.linalg.inv(z_to_axis)
inhomogeneous_rotation_matrix = reduce(np.dot, [z_to_axis, about_z, axis_to_z])
if not homogeneous:
return inhomogeneous_rotation_matrix
else:
rotation_matrix = np.eye(4)
rotation_matrix[:3, :3] = inhomogeneous_rotation_matrix
return rotation_matrix
def rotation_about_z(angle: float) -> List[List[float]]:
"""Returns a rotation matrix for a given angle.
Parameters
----------
angle : float
Angle for the rotation matrix.
Returns
-------
List[float]
Gives back the rotated matrix.
"""
return [
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
def z_to_vector(vector: np.ndarray) -> np.ndarray:
"""
Returns some matrix in SO(3) which takes the z-axis to the
(normalized) vector provided as an argument
"""
norm = np.linalg.norm(vector)
if norm == 0:
return np.identity(3)
v = np.array(vector) / norm
phi = np.arccos(v[2])
if any(v[:2]):
# projection of vector to unit circle
axis_proj = v[:2] / np.linalg.norm(v[:2])
theta = np.arccos(axis_proj[0])
if axis_proj[1] < 0:
theta = -theta
else:
theta = 0
phi_down = np.array(
[[np.cos(phi), 0, np.sin(phi)], [0, 1, 0], [-np.sin(phi), 0, np.cos(phi)]]
)
return np.dot(rotation_about_z(theta), phi_down)
def angle_of_vector(vector: Sequence[float]) -> float:
"""Returns polar coordinate theta when vector is projected on xy plane.
Parameters
----------
vector
The vector to find the angle for.
Returns
-------
float
The angle of the vector projected.
"""
if config.renderer == "opengl":
return np.angle(complex(*vector[:2]))
else:
z = complex(*vector[:2])
if z == 0:
return 0
return np.angle(complex(*vector[:2]))
def angle_between_vectors(v1: np.ndarray, v2: np.ndarray) -> np.ndarray:
"""Returns the angle between two vectors.
This angle will always be between 0 and pi
Parameters
----------
v1
The first vector.
v2
The second vector.
Returns
-------
np.ndarray
The angle between the vectors.
"""
return 2 * np.arctan2(
np.linalg.norm(normalize(v1) - normalize(v2)),
np.linalg.norm(normalize(v1) + normalize(v2)),
)
def project_along_vector(point: float, vector: np.ndarray) -> np.ndarray:
"""Projects a vector along a point.
Parameters
----------
point
The point to be project from.
vector
The vector which has to projected.
Returns
-------
np.ndarray
A dot product of the point and vector.
"""
matrix = np.identity(3) - np.outer(vector, vector)
return np.dot(point, matrix.T)
def normalize(vect: Union[np.ndarray, Tuple[float]], fall_back=None) -> np.ndarray:
norm = np.linalg.norm(vect)
if norm > 0:
return np.array(vect) / norm
else:
if fall_back is not None:
return fall_back
else:
return np.zeros(len(vect))
def normalize_along_axis(array: np.ndarray, axis: np.ndarray) -> np.ndarray:
"""Normalizes an array with the provided axis.
Parameters
----------
array
The array which has to be normalized.
axis
The axis to be normalized to.
Returns
-------
np.ndarray
Array which has been normalized according to the axis.
"""
norms = np.sqrt((array * array).sum(axis))
norms[norms == 0] = 1
buffed_norms = np.repeat(norms, array.shape[axis]).reshape(array.shape)
array /= buffed_norms
return array
def get_unit_normal(v1: np.ndarray, v2: np.ndarray, tol: float = 1e-6) -> np.ndarray:
"""Gets the unit normal of the vectors.
Parameters
----------
v1
The first vector.
v2
The second vector
tol
[description], by default 1e-6
Returns
-------
np.ndarray
The normal of the two vectors.
"""
if config.renderer == "opengl":
v1 = normalize(v1)
v2 = normalize(v2)
cp = np.cross(v1, v2)
cp_norm = np.linalg.norm(cp)
if cp_norm < tol:
# Vectors align, so find a normal to them in the plane shared with the z-axis
new_cp = np.cross(np.cross(v1, OUT), v1)
new_cp_norm = np.linalg.norm(new_cp)
if new_cp_norm < tol:
return DOWN
return new_cp / new_cp_norm
return cp / cp_norm
else:
return normalize(np.cross(v1, v2))
###
def compass_directions(n: int = 4, start_vect: np.ndarray = RIGHT) -> np.ndarray:
"""Finds the cardinal directions using tau.
Parameters
----------
n
The amount to be rotated, by default 4
start_vect
The direction for the angle to start with, by default RIGHT
Returns
-------
np.ndarray
The angle which has been rotated.
"""
angle = TAU / n
return np.array([rotate_vector(start_vect, k * angle) for k in range(n)])
def regular_vertices(
n: int, *, radius: float = 1, start_angle: Optional[float] = None
) -> Tuple[np.ndarray, float]:
"""Generates regularly spaced vertices around a circle centered at the origin.
Parameters
----------
n
The number of vertices
radius
The radius of the circle that the vertices are placed on.
start_angle
The angle the vertices start at.
If unspecified, for even ``n`` values, ``0`` will be used.
For odd ``n`` values, 90 degrees is used.
Returns
-------
vertices : :class:`numpy.ndarray`
The regularly spaced vertices.
start_angle : :class:`float`
The angle the vertices start at.
"""
if start_angle is None:
if n % 2 == 0:
start_angle = 0
else:
start_angle = TAU / 4
start_vector = rotate_vector(RIGHT * radius, start_angle)
vertices = compass_directions(n, start_vector)
return vertices, start_angle
def complex_to_R3(complex_num: complex) -> np.ndarray:
return np.array((complex_num.real, complex_num.imag, 0))
def R3_to_complex(point: Sequence[float]) -> np.ndarray:
return complex(*point[:2])
def complex_func_to_R3_func(complex_func):
return lambda p: complex_to_R3(complex_func(R3_to_complex(p)))
def center_of_mass(points: Sequence[float]) -> np.ndarray:
"""Gets the center of mass of the points in space.
Parameters
----------
points
The points to find the center of mass from.
Returns
-------
np.ndarray
The center of mass of the points.
"""
points = [np.array(point).astype("float") for point in points]
return sum(points) / len(points)
def midpoint(
point1: Sequence[float], point2: Sequence[float]
) -> Union[float, np.ndarray]:
"""Gets the midpoint of two points.
Parameters
----------
point1
The first point.
point2
The second point.
Returns
-------
Union[float, np.ndarray]
The midpoint of the points
"""
return center_of_mass([point1, point2])
def line_intersection(line1: Sequence[float], line2: Sequence[float]) -> np.ndarray:
"""Returns intersection point of two lines, each defined with
a pair of vectors determining the end points.
Parameters
----------
line1
The first line.
line2
The second line.
Returns
-------
np.ndarray
The intersection points of the two lines which are intersecting.
Raises
------
ValueError
Error is produced if the two lines don't intersect with each other
"""
x_diff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
y_diff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(x_diff, y_diff)
if div == 0:
raise ValueError("Lines do not intersect")
d = (det(*line1), det(*line2))
x = det(d, x_diff) / div
y = det(d, y_diff) / div
return np.array([x, y, 0])
def find_intersection(p0, v0, p1, v1, threshold=1e-5) -> np.ndarray:
"""
Return the intersection of a line passing through p0 in direction v0
with one passing through p1 in direction v1. (Or array of intersections
from arrays of such points/directions).
For 3d values, it returns the point on the ray p0 + v0 * t closest to the
ray p1 + v1 * t
"""
p0 = np.array(p0, ndmin=2)
v0 = np.array(v0, ndmin=2)
p1 = np.array(p1, ndmin=2)
v1 = np.array(v1, ndmin=2)
m, n = np.shape(p0)
assert n in [2, 3]
numerator = np.cross(v1, p1 - p0)
denominator = np.cross(v1, v0)
if n == 3:
d = len(np.shape(numerator))
new_numerator = np.multiply(numerator, numerator).sum(d - 1)
new_denominator = np.multiply(denominator, numerator).sum(d - 1)
numerator, denominator = new_numerator, new_denominator
denominator[abs(denominator) < threshold] = np.inf # So that ratio goes to 0 there
ratio = numerator / denominator
ratio = | np.repeat(ratio, n) | numpy.repeat |
from pydpm._sampler import Basic_Sampler
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from collections import Counter
def debug_sampler_and_plot():
sampler = Basic_Sampler('gpu')
# gamma
output = sampler.gamma(np.ones(1000)*4.5, 5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 100, 100), stats.gamma.pdf(np.linspace(0, 100, 100), 4.5, scale=5))
plt.title('gamma(4.5, 5)')
plt.show()
# standard_gamma
output = sampler.standard_gamma(np.ones(1000)*4.5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 20, 100), stats.gamma.pdf(np.linspace(0, 20, 100), 4.5))
plt.title('standard_gamma(4.5)')
plt.show()
# dirichlet
output = sampler.dirichlet(np.ones(1000)*4.5)
plt.figure()
plt.hist(output, bins=20, density=True)
# x = np.linspace(np.min(output), np.max(output), 100)
# plt.plot(x, stats.dirichlet.pdf(x, alpha=np.ones(100)*4.5))
plt.title('dirichlet(4.5)')
plt.show()
# beta
output = sampler.beta(np.ones(1000)*0.5, 0.5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 1, 100), stats.beta.pdf(np.linspace(0, 1, 100), 0.5, 0.5))
plt.title('beta(0.5, 0.5)')
plt.show()
# beta(2, 5)
output = sampler.beta(np.ones(1000)*2, 5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 1, 100), stats.beta.pdf(np.linspace(0, 1, 100), 2, 5))
plt.title('beta(2, 5)')
plt.show()
# normal
output = sampler.normal(np.ones(1000)*5, np.ones(1000)*2)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-2, 13, 100), stats.norm.pdf(np.linspace(-2, 13, 100), 5, scale=2))
plt.title('normal(5, 2)')
plt.show()
# standard_normal
output = sampler.standard_normal(1000)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-3, 3, 100), stats.norm.pdf(np.linspace(-3, 3, 100)))
plt.title('standard_normal()')
plt.show()
# uniform
output = sampler.uniform(np.ones(1000)*(-2), np.ones(1000)*5)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-3, 6, 100), stats.uniform.pdf(np.linspace(-3, 6, 100), -2, 7))
plt.title('uniform(-2, 5)')
plt.show()
# standard_uniform
output = sampler.standard_uniform(1000)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-0.3, 1.3, 100), stats.uniform.pdf(np.linspace(-0.3, 1.3, 100)))
plt.title('standard_uniform()')
plt.show()
# binomial
output = sampler.binomial(np.ones(1000)*10, np.ones(1000)*0.5)
plt.figure()
plt.hist(output, bins=np.max(output)-np.min(output), density=True, range=(np.min(output)-0.5, np.max(output)-0.5))
# plt.scatter(np.arange(10), stats.binom._pmf(np.arange(10), 10, 0.5), c='orange', zorder=10)
plt.title('binomial(10, 0.5)')
plt.show()
# negative_binomial
output = sampler.negative_binomial(np.ones(1000)*10, 0.5)
plt.figure()
plt.hist(output, bins=np.max(output)-np.min(output), density=True, range=(np.min(output)-0.5, np.max(output)-0.5))
plt.scatter(np.arange(30), stats.nbinom._pmf(np.arange(30), 10, 0.5), c='orange', zorder=10)
plt.title('negative_binomial(10, 0.5)')
plt.show()
# multinomial
output = sampler.multinomial(5, [0.8, 0.2], 1000)
# output = sampler.multinomial([10]*4, [[0.8, 0.2]]*4, 3)
plt.figure()
plt.hist(output[0], bins=10, density=True)
plt.title('multinomial(5, [0.8, 0.2])')
plt.show()
a = np.array([np.array([[i] * 6 for i in range(6)]).reshape(-1), np.array(list(range(6)) * 6)]).T
output = stats.multinomial(n=5, p=[0.8, 0.2]).pmf(a)
sns.heatmap(output.reshape(6, 6), annot=True)
plt.ylabel('number of the 1 kind(p=0.8)')
plt.xlabel('number of the 2 kind(p=0.2)')
plt.title('stats.multinomial(n=5, p=[0.8, 0.2])')
plt.show()
# poisson
output = sampler.poisson(np.ones(1000)*10)
plt.figure()
plt.hist(output, bins=22, density=True, range=(-0.5, 21.5))
plt.scatter(np.arange(20), stats.poisson.pmf(np.arange(20), 10), c='orange', zorder=10)
plt.title('poisson(10)')
plt.show()
# cauchy
output = sampler.cauchy(np.ones(1000)*1, 0.5)
plt.figure()
plt.hist(output, bins=20, density=True, range=(-5, 7))
plt.plot(np.linspace(-5, 7, 100), stats.cauchy.pdf(np.linspace(-5, 7, 100), 1, 0.5))
plt.title('cauchy(1, 0.5)')
plt.show()
# standard_cauchy
output = sampler.standard_cauchy(1000)
plt.figure()
plt.hist(output, bins=20, density=True, range=(-7, 7))
plt.plot(np.linspace(-7, 7, 100), stats.cauchy.pdf(np.linspace(-7, 7, 100)))
plt.title('standard_cauchy()')
plt.show()
# chisquare
output = sampler.chisquare(np.ones(1000)*10)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 30, 100), stats.chi2.pdf(np.linspace(0, 30, 100), 10))
plt.title('chisquare(10)')
plt.show()
# noncentral_chisquare
output = sampler.noncentral_chisquare(np.ones(1000)*10, 5)
plt.figure()
plt.hist(output, bins=20, density=True)
# nocentral_chi2 = scale^2 * (chi2 + 2*loc*chi + df*loc^2)
# E(Z) = nonc + df
# Var(Z) = 2(df+2nonc)
plt.title('noncentral_chisquare(df=10, nonc=5)')
plt.show()
# exponential
lam = 0.5
output = sampler.exponential(np.ones(1000)*lam)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0.01, 4, 100), stats.expon.pdf(np.linspace(0.01, 4, 100), scale=0.5))
plt.title('exponential(0.5)')
plt.show()
# standard_exponential
output = sampler.standard_exponential(1000)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0.01, 8, 100), stats.expon.pdf(np.linspace(0.01, 8, 100)))
plt.title('standard_exponential()')
plt.show()
# f
output = sampler.f(np.ones(1000)*10, 10)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 8, 100), stats.f.pdf(np.linspace(0, 8, 100), 10, 10))
plt.title('f(10, 10)')
plt.show()
# noncentral_f
output = sampler.noncentral_f(np.ones(1000)*10, 10, 5)
plt.figure()
plt.hist(output, bins=20, density=True)
# E(F) = (m+nonc)*n / (m*(n-2)), n>2.
# Var(F) = 2*(n/m)**2 * ((m+nonc)**2 + (m+2*nonc)*(n-2)) / ((n-2)**2 * (n-4))
plt.title('noncentral_f(dfnum=10, dfden=10, nonc=5)')
plt.show()
# geometric
output = sampler.geometric(np.ones(1000)*0.1)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.scatter(np.arange(50), stats.geom.pmf(np.arange(50), p=0.1), c='orange', zorder=10)
plt.title('geometric(0.1)')
plt.show()
# gumbel
output = sampler.gumbel(np.ones(1000)*5, np.ones(1000)*2)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(0, 20, 100), stats.gumbel_r.pdf(np.linspace(0, 20, 100)+0.01, 5, scale=2))
plt.title('gumbel(5, 2)')
plt.show()
np.random.gumbel()
# hypergeometric
output = sampler.hypergeometric(np.ones(1000)*5, 10, 10)
plt.figure()
plt.hist(output, bins=np.max(output)-np.min(output), density=True, range=(np.min(output)+0.5, np.max(output)+0.5))
plt.scatter(np.arange(10), stats.hypergeom(15, 5, 10).pmf(np.arange(10)), c='orange', zorder=10) # hypergeom(M, n, N), total, I, tiems
plt.title('hypergeometric(5, 10, 10)')
plt.show()
# laplace
output = sampler.laplace(np.ones(1000)*5, np.ones(1000)*2)
plt.figure()
plt.hist(output, bins=20, density=True)
plt.plot(np.linspace(-10, 20, 100), stats.laplace.pdf(np.linspace(-10, 20, 100), 5, scale=2))
plt.title('laplace(5, 2)')
plt.show()
# logistic
output = sampler.logistic( | np.ones(1000) | numpy.ones |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
import os
import git
import sys
repo = git.Repo(".", search_parent_directories=True)
# print(repo.working_tree_dir)
if f"{repo.working_tree_dir}" not in sys.path:
sys.path.append(f"{repo.working_tree_dir}")
print("add")
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List
from collections import defaultdict, namedtuple
import logging
import time
from datetime import datetime
import pickle
import os
import librosa
import scipy
from scipy.io import wavfile
from scipy.signal import fftconvolve
import numpy as np
import networkx as nx
import habitat_sim
from habitat_sim.utils.common import quat_from_angle_axis, quat_from_coeffs, quat_to_angle_axis
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.core.simulator import Config, AgentState, ShortestPathPoint
from soundspaces.utils import load_metadata, _to_tensor
#from habitat.core.registry import registry
# from ss_baselines.common.registry import registry
from contextlib import redirect_stdout
class DummySimulator:
"""
Dummy simulator for avoiding loading the scene meshes when using cached observations.
"""
def __init__(self):
self.position = None
self.rotation = None
self._sim_obs = None
def seed(self, seed):
pass
def set_agent_state(self, position, rotation):
self.position = np.array(position, dtype=np.float32)
self.rotation = rotation
def get_agent_state(self):
class State:
def __init__(self, position, rotation):
self.position = position
self.rotation = rotation
return State(self.position, self.rotation)
def set_sensor_observations(self, sim_obs):
self._sim_obs = sim_obs
def get_sensor_observations(self):
return self._sim_obs
def close(self):
pass
from habitat.core.registry import registry
@registry.register_simulator(name='SoundSpaces')
class SoundSpaces(HabitatSim):
r"""Changes made to simulator wrapper over habitat-sim
This simulator first loads the graph of current environment and moves the agent among nodes.
Any sounds can be specified in the episode and loaded in this simulator.
Args:
config: configuration for initializing the simulator.
"""
def action_space_shortest_path(self, source: AgentState, targets: List[AgentState], agent_id: int = 0) -> List[
ShortestPathPoint]:
pass
def __init__(self, config: Config) -> None:
print("2:sim enter--->SoundSpaces")
super().__init__(config)
self._source_position_index = None
self._receiver_position_index = None
self._rotation_angle = None
self._current_sound = None
self._source_sound_dict = dict()
self._sampling_rate = None
self._node2index = None
self._frame_cache = dict()
self._audiogoal_cache = dict()
self._spectrogram_cache = dict()
self._scene_observations = None
self._episode_step_count = None
self._is_episode_active = None
self._position_to_index_mapping = dict()
self._previous_step_collided = False
self.points, self.graph = load_metadata(self.metadata_dir)
for node in self.graph.nodes():
self._position_to_index_mapping[self.position_encoding(self.graph.nodes()[node]['point'])] = node
self._load_sound_sources()
logging.info('Current scene: {} and sound: {}'.format(self.current_scene_name, self._current_sound))
if self.config.USE_RENDERED_OBSERVATIONS:
self._sim.close()
del self._sim
self._sim = DummySimulator()
with open(self.current_scene_observation_file, 'rb') as fo:
self._frame_cache = pickle.load(fo)
self._cnt = 0
def get_agent_state(self, agent_id: int = 0) -> habitat_sim.AgentState:
if not self.config.USE_RENDERED_OBSERVATIONS:
agent_state = super().get_agent_state(agent_id)
else:
agent_state = self._sim.get_agent_state()
return agent_state
def set_agent_state(
self,
position: List[float],
rotation: List[float],
agent_id: int = 0,
reset_sensors: bool = True,
) -> bool:
if not self.config.USE_RENDERED_OBSERVATIONS:
super().set_agent_state(position, rotation, agent_id=agent_id, reset_sensors=reset_sensors)
else:
pass
@property
def binaural_rir_dir(self):
return os.path.join(self.config.AUDIO.BINAURAL_RIR_DIR, self.config.SCENE_DATASET, self.current_scene_name)
@property
def source_sound_dir(self):
return self.config.AUDIO.SOURCE_SOUND_DIR
@property
def metadata_dir(self):
return os.path.join(self.config.AUDIO.METADATA_DIR, self.config.SCENE_DATASET, self.current_scene_name)
@property
def current_scene_name(self):
# config.SCENE (_current_scene) looks like 'data/scene_datasets/replica/office_1/habitat/mesh_semantic.ply'
return self._current_scene.split('/')[3]
@property
def current_scene_observation_file(self):
return os.path.join(self.config.SCENE_OBSERVATION_DIR, self.config.SCENE_DATASET,
self.current_scene_name + '.pkl')
@property
def current_source_sound(self):
return self._source_sound_dict[self._current_sound]
def reconfigure(self, config: Config):#-> None:
#
# now = datetime.now()
# now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
# self._agent_file_name = f"data/models/replica/av_nav/e0010-av_nav-ds/audiogoal_depth/{now_str}.txt"
#
self.config = config
is_same_sound = config.AGENT_0.SOUND == self._current_sound
if not is_same_sound:
self._current_sound = self.config.AGENT_0.SOUND
is_same_scene = config.SCENE == self._current_scene
if not is_same_scene:
self._current_scene = config.SCENE
logging.debug('Current scene: {} and sound: {}'.format(self.current_scene_name, self._current_sound))
if not self.config.USE_RENDERED_OBSERVATIONS:
self._sim.close()
del self._sim
self.sim_config = self.create_sim_config(self._sensor_suite)
self._sim = habitat_sim.Simulator(self.sim_config)
self._update_agents_state()
self._frame_cache = dict()
else:
with open(self.current_scene_observation_file, 'rb') as fo:
self._frame_cache = pickle.load(fo)
logging.debug('Loaded scene {}'.format(self.current_scene_name))
self.points, self.graph = load_metadata(self.metadata_dir)
for node in self.graph.nodes():
self._position_to_index_mapping[self.position_encoding(self.graph.nodes()[node]['point'])] = node
if not is_same_scene or not is_same_sound:
self._audiogoal_cache = dict()
self._spectrogram_cache = dict()
self._episode_step_count = 0
# set agent positions
self._receiver_position_index = self._position_to_index(self.config.AGENT_0.START_POSITION)
self._source_position_index = self._position_to_index(self.config.AGENT_0.GOAL_POSITION)
# the agent rotates about +Y starting from -Z counterclockwise,
# so rotation angle 90 means the agent rotate about +Y 90 degrees
self._rotation_angle = int(np.around(np.rad2deg(quat_to_angle_axis(quat_from_coeffs(
self.config.AGENT_0.START_ROTATION))[0]))) % 360
if not self.config.USE_RENDERED_OBSERVATIONS:
self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
self.config.AGENT_0.START_ROTATION)
else:
self._sim.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
quat_from_coeffs(self.config.AGENT_0.START_ROTATION))
logging.debug("Initial source, agent at: {}, {}, orientation: {}".
format(self._source_position_index, self._receiver_position_index, self.get_orientation()))
#add
# self._cnt = 0
# with open(self._agent_file_name,'w') as f:
# f.write(f'goal :{self._source_position_index}\n')
# f.write(f'start:{self._receiver_position_index}\n')
# f.write('#STOP: 0, FORWARD: 1, LEFT: 2, RIGHT: 3\n')
# f.close()
@staticmethod
def position_encoding(position):
# print(position)
if isinstance(position,list):
if isinstance(position[0],list):#[[-1.109645, -1.5434817, 0.683166]]
position = tuple(position[0])
else: #[0.3903549909591675, -1.543481707572937, 2.183166027069092]
position = tuple(position)
# print(position)
return '{:.2f}_{:.2f}_{:.2f}'.format(*position)
def _position_to_index(self, position):
if self.position_encoding(position) in self._position_to_index_mapping:
return self._position_to_index_mapping[self.position_encoding(position)]
else:
raise ValueError("Position misalignment.")
def _get_sim_observation(self):
joint_index = (self._receiver_position_index, self._rotation_angle)
if joint_index in self._frame_cache:
return self._frame_cache[joint_index]
else:
assert not self.config.USE_RENDERED_OBSERVATIONS
sim_obs = self._sim.get_sensor_observations()
for sensor in sim_obs:
sim_obs[sensor] = sim_obs[sensor]
self._frame_cache[joint_index] = sim_obs
return sim_obs
def reset(self):
logging.debug('Reset simulation')
if not self.config.USE_RENDERED_OBSERVATIONS:
sim_obs = self._sim.reset()
if self._update_agents_state():
sim_obs = self._get_sim_observation()
else:
sim_obs = self._get_sim_observation()
self._sim.set_sensor_observations(sim_obs)
self._is_episode_active = True
self._prev_sim_obs = sim_obs
self._previous_step_collided = False
# Encapsule data under Observations class
observations = self._sensor_suite.get_observations(sim_obs)
return observations
def step(self, action, only_allowed=True):
"""
All angle calculations in this function is w.r.t habitat coordinate frame, on X-Z plane
where +Y is upward, -Z is forward and +X is rightward.
Angle 0 corresponds to +X, angle 90 corresponds to +y and 290 corresponds to 270.
:param action: action to be taken
:param only_allowed: if true, then can't step anywhere except allowed locations
:return:
Dict of observations
"""
# with open(self._agent_file_name,'a') as f:
# f.write(f'action:{action}------->_is_episode_active:{self._is_episode_active}----{self._cnt}\n')
# f.close()
#
assert self._is_episode_active, (
"episode is not active, environment not RESET or "
"STOP action called previously"
)
self._previous_step_collided = False
# STOP: 0, FORWARD: 1, LEFT: 2, RIGHT: 2
if action == HabitatSimActions.STOP:
self._is_episode_active = False
else:
prev_position_index = self._receiver_position_index
prev_rotation_angle = self._rotation_angle
if action == HabitatSimActions.MOVE_FORWARD:
# the agent initially faces -Z by default
self._previous_step_collided = True
for neighbor in self.graph[self._receiver_position_index]:
p1 = self.graph.nodes[self._receiver_position_index]['point']
p2 = self.graph.nodes[neighbor]['point']
direction = int(np.around(np.rad2deg(np.arctan2(p2[2] - p1[2], p2[0] - p1[0])))) % 360
if direction == self.get_orientation():
self._receiver_position_index = neighbor
self._previous_step_collided = False
break
elif action == HabitatSimActions.TURN_LEFT:
# agent rotates counterclockwise, so turning left means increasing rotation angle by 90
self._rotation_angle = (self._rotation_angle + 90) % 360
elif action == HabitatSimActions.TURN_RIGHT:
self._rotation_angle = (self._rotation_angle - 90) % 360
if self.config.CONTINUOUS_VIEW_CHANGE:
intermediate_observations = list()
fps = self.config.VIEW_CHANGE_FPS
if action == HabitatSimActions.MOVE_FORWARD:
prev_position = np.array(self.graph.nodes[prev_position_index]['point'])
current_position = np.array(self.graph.nodes[self._receiver_position_index]['point'])
for i in range(1, fps):
intermediate_position = prev_position + i / fps * (current_position - prev_position)
self.set_agent_state(intermediate_position.tolist(), quat_from_angle_axis(np.deg2rad(
self._rotation_angle), np.array([0, 1, 0])))
sim_obs = self._sim.get_sensor_observations()
observations = self._sensor_suite.get_observations(sim_obs)
intermediate_observations.append(observations)
else:
for i in range(1, fps):
if action == HabitatSimActions.TURN_LEFT:
intermediate_rotation = prev_rotation_angle + i / fps * 90
elif action == HabitatSimActions.TURN_RIGHT:
intermediate_rotation = prev_rotation_angle - i / fps * 90
self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
quat_from_angle_axis(np.deg2rad(intermediate_rotation),
np.array([0, 1, 0])))
sim_obs = self._sim.get_sensor_observations()
observations = self._sensor_suite.get_observations(sim_obs)
intermediate_observations.append(observations)
if not self.config.USE_RENDERED_OBSERVATIONS:
self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
quat_from_angle_axis(np.deg2rad(self._rotation_angle), np.array([0, 1, 0])))
else:
self._sim.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
quat_from_angle_axis(np.deg2rad(self._rotation_angle), np.array([0, 1, 0])))
self._episode_step_count += 1
# log debugging info
logging.debug('After taking action {}, s,r: {}, {}, orientation: {}, location: {}'.format(
action, self._source_position_index, self._receiver_position_index,
self.get_orientation(), self.graph.nodes[self._receiver_position_index]['point']))
sim_obs = self._get_sim_observation()
if self.config.USE_RENDERED_OBSERVATIONS:
self._sim.set_sensor_observations(sim_obs)
self._prev_sim_obs = sim_obs
observations = self._sensor_suite.get_observations(sim_obs)
if self.config.CONTINUOUS_VIEW_CHANGE:
observations['intermediate'] = intermediate_observations
#__add__
# with open(self._agent_file_name,'a') as f:
# f.write(f'{action}------->{self._receiver_position_index}----{self._cnt}\n')
# f.close()
# self._cnt += 1
#
return observations
def get_orientation(self):
_base_orientation = 270
return (_base_orientation - self._rotation_angle) % 360
@property
def azimuth_angle(self):
# this is the angle used to index the binaural audio files
# in mesh coordinate systems, +Y forward, +X rightward, +Z upward
# azimuth is calculated clockwise so +Y is 0 and +X is 90
return -(self._rotation_angle + 0) % 360
@property
def reaching_goal(self):
return self._source_position_index == self._receiver_position_index
def _update_observations_with_audio(self, observations):
audio = self.get_current_audio_observation()
observations.update({"audio": audio})
def _load_sound_sources(self):
# load all mono files at once
sound_files = os.listdir(self.source_sound_dir)
for sound_file in sound_files:
sound = sound_file.split('.')[0]
sr, audio_data = wavfile.read(os.path.join(self.source_sound_dir, sound_file))
assert sr == 44100
if sr != self.config.AUDIO.RIR_SAMPLING_RATE:
audio_data = scipy.signal.resample(audio_data, self.config.AUDIO.RIR_SAMPLING_RATE)
self._source_sound_dict[sound] = audio_data
def _compute_euclidean_distance_between_sr_locations(self):
p1 = self.graph.nodes[self._receiver_position_index]['point']
p2 = self.graph.nodes[self._source_position_index]['point']
d = np.sqrt((p1[0] - p2[0])**2 + (p1[2] - p2[2])**2)
return d
def _compute_audiogoal(self):
binaural_rir_file = os.path.join(self.binaural_rir_dir, str(self.azimuth_angle), '{}_{}.wav'.format(
self._receiver_position_index, self._source_position_index))
try:
sampling_freq, binaural_rir = wavfile.read(binaural_rir_file) # float32
# # pad RIR with zeros to take initial delays into account
# num_delay_sample = int(self._compute_euclidean_distance_between_sr_locations() / 343.0 * sampling_freq)
# binaural_rir = np.pad(binaural_rir, ((num_delay_sample, 0), (0, 0)))
except ValueError:
logging.warning("{} file is not readable".format(binaural_rir_file))
binaural_rir = np.zeros((self.config.AUDIO.RIR_SAMPLING_RATE, 2)).astype(np.float32)
if len(binaural_rir) == 0:
logging.debug("Empty RIR file at {}".format(binaural_rir_file))
binaural_rir = np.zeros((self.config.AUDIO.RIR_SAMPLING_RATE, 2)).astype(np.float32)
# by default, convolve in full mode, which preserves the direct sound
binaural_convolved = [fftconvolve(self.current_source_sound, binaural_rir[:, channel]
) for channel in range(binaural_rir.shape[-1])]
audiogoal = | np.array(binaural_convolved) | numpy.array |
##############################################################################
#Copyright 2019 Google LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#################################################################################
#### C O D E C A N B E RE-U S E D W I T H C I T A T I O N B E L O W ####
#################################################################################
############### F E A T U R E W I Z ###############
################ featurewiz library developed by <NAME> #################
#### THIS METHOD IS KNOWN AS SULOV METHOD in HONOR OF my mom, SULOCHANA #########
##### SULOV means Searching for Uncorrelated List Of Variables ###########
############### v 0.0.1 ################
############### A L L R I G H T S R E S E R V E D ################
#################################################################################
##### This project is not an official Google project. It is not supported by ####
##### Google and Google specifically disclaims all warranties as to its quality,#
##### merchantability, or fitness for a particular purpose. ####################
#################################################################################
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
from sklearn.multiclass import OneVsRestClassifier
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import train_test_split
################################################################################
#### The warnings from Sklearn are so annoying that I have to shut it off #######
import warnings
warnings.filterwarnings("ignore")
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
def warn(*args, **kwargs):
pass
warnings.warn = warn
####################################################################################
import re
import pdb
import pprint
from itertools import cycle, combinations
from collections import defaultdict, OrderedDict
import copy
import time
import sys
import random
import xlrd
import statsmodels
from io import BytesIO
import base64
from functools import reduce
import copy
#######################################################################################################
def classify_features(dfte, depVar, verbose=0):
dfte = copy.deepcopy(dfte)
if isinstance(depVar, list):
orig_preds = [x for x in list(dfte) if x not in depVar]
else:
orig_preds = [x for x in list(dfte) if x not in [depVar]]
################# CLASSIFY COLUMNS HERE ######################
var_df = classify_columns(dfte[orig_preds], verbose)
##### Classify Columns ################
IDcols = var_df['id_vars']
discrete_string_vars = var_df['nlp_vars']+var_df['discrete_string_vars']
cols_delete = var_df['cols_delete']
bool_vars = var_df['string_bool_vars'] + var_df['num_bool_vars']
int_vars = var_df['int_vars']
categorical_vars = var_df['cat_vars'] + var_df['factor_vars'] + int_vars + bool_vars
date_vars = var_df['date_vars']
if len(var_df['continuous_vars'])==0 and len(int_vars)>0:
continuous_vars = var_df['int_vars']
categorical_vars = left_subtract(categorical_vars, int_vars)
int_vars = []
else:
continuous_vars = var_df['continuous_vars']
preds = [x for x in orig_preds if x not in IDcols+cols_delete+discrete_string_vars]
if len(IDcols+cols_delete+discrete_string_vars) == 0:
print(' No variables removed since no ID or low-information variables found in data set')
else:
print(' %d variables removed since they were ID or low-information variables'
%len(IDcols+cols_delete+discrete_string_vars))
if verbose >= 1:
print(' List of variables removed: %s' %(IDcols+cols_delete+discrete_string_vars))
############# Check if there are too many columns to visualize ################
ppt = pprint.PrettyPrinter(indent=4)
if verbose==1 and len(cols_list) <= max_cols_analyzed:
marthas_columns(dft,verbose)
print(" Columns to delete:")
ppt.pprint(' %s' % cols_delete)
print(" Boolean variables %s ")
ppt.pprint(' %s' % bool_vars)
print(" Categorical variables %s ")
ppt.pprint(' %s' % categorical_vars)
print(" Continuous variables %s " )
ppt.pprint(' %s' % continuous_vars)
print(" Discrete string variables %s " )
ppt.pprint(' %s' % discrete_string_vars)
print(" Date and time variables %s " )
ppt.pprint(' %s' % date_vars)
print(" ID variables %s ")
ppt.pprint(' %s' % IDcols)
print(" Target variable %s ")
ppt.pprint(' %s' % depVar)
elif verbose==1 and len(cols_list) > max_cols_analyzed:
print(' Total columns > %d, too numerous to list.' %max_cols_analyzed)
features_dict = dict([('IDcols',IDcols),('cols_delete',cols_delete),('bool_vars',bool_vars),('categorical_vars',categorical_vars),
('continuous_vars',continuous_vars),('discrete_string_vars',discrete_string_vars),
('date_vars',date_vars)])
return features_dict
#######################################################################################################
def marthas_columns(data,verbose=0):
"""
This program is named in honor of my one of students who came up with the idea for it.
It's a neat way of printing data types and information compared to the boring describe() function in Pandas.
"""
data = data[:]
print('Data Set Shape: %d rows, %d cols' % data.shape)
if data.shape[1] > 30:
print('Too many columns to print')
else:
if verbose==1:
print('Data Set columns info:')
for col in data.columns:
print('* %s: %d nulls, %d unique vals, most common: %s' % (
col,
data[col].isnull().sum(),
data[col].nunique(),
data[col].value_counts().head(2).to_dict()
))
print('--------------------------------------------------------------------')
################################################################################
######### NEW And FAST WAY to CLASSIFY COLUMNS IN A DATA SET #######
################################################################################
def classify_columns(df_preds, verbose=0):
"""
Takes a dataframe containing only predictors to be classified into various types.
DO NOT SEND IN A TARGET COLUMN since it will try to include that into various columns.
Returns a data frame containing columns and the class it belongs to such as numeric,
categorical, date or id column, boolean, nlp, discrete_string and cols to delete...
####### Returns a dictionary with 10 kinds of vars like the following: # continuous_vars,int_vars
# cat_vars,factor_vars, bool_vars,discrete_string_vars,nlp_vars,date_vars,id_vars,cols_delete
"""
train = copy.deepcopy(df_preds)
#### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable
max_nlp_char_size = 30
max_cols_to_print = 30
print('############## C L A S S I F Y I N G V A R I A B L E S ####################')
print('Classifying variables in data set...')
#### Cat_Limit defines the max number of categories a column can have to be called a categorical colum
cat_limit = 35
float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###
def add(a,b):
return a+b
sum_all_cols = dict()
orig_cols_total = train.shape[1]
#Types of columns
cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1
) | (train[col].isnull().sum()/len(train) >= 0.90)]
train = train[left_subtract(list(train),cols_delete)]
var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(
columns={0:'type_of_column'})
sum_all_cols['cols_delete'] = cols_delete
var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']
and len(train[x['index']].value_counts()) == 2 else 0, axis=1)
string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])
sum_all_cols['string_bool_vars'] = string_bool_vars
var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16','int32','int64',
'float16','float32','float64'] and len(
train[x['index']].value_counts()) == 2 else 0, axis=1)
num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])
sum_all_cols['num_bool_vars'] = num_bool_vars
###### This is where we take all Object vars and split them into diff kinds ###
discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[
'index'] not in string_bool_vars+cols_delete else 0,axis=1)
######### This is where we figure out whether a string var is nlp or discrete_string var ###
var_df['nlp_strings'] = 0
var_df['discrete_strings'] = 0
var_df['cat'] = 0
var_df['id_col'] = 0
discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()
if len(var_df.loc[discrete_or_nlp==1]) != 0:
for col in discrete_or_nlp_vars:
#### first fill empty or missing vals since it will blowup ###
train[col] = train[col].fillna(' ')
if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(
) >= max_nlp_char_size and len(train[col].value_counts()
) <= int(0.9*len(train)) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'nlp_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) <= int(0.9*len(train)) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'discrete_strings'] = 1
elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()
) == len(train) and col not in string_bool_vars:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
var_df.loc[var_df['index']==col,'cat'] = 1
nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])
sum_all_cols['nlp_vars'] = nlp_vars
discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])
sum_all_cols['discrete_string_vars'] = discrete_string_vars
###### This happens only if a string column happens to be an ID column #######
#### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...
#### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###
var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,
axis=1)
factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])
sum_all_cols['factor_vars'] = factor_vars
########################################################################
date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,
np.uint16, np.uint32, np.uint64,
'int8','int16',
'int32','int64'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
######### This is where we figure out whether a numeric col is date or id variable ###
var_df['int'] = 0
var_df['date_time'] = 0
### if a particular column is date-time type, now set it as a date time variable ##
var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[
'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,
axis=1)
### this is where we save them as date time variables ###
if len(var_df.loc[date_or_id==1]) != 0:
for col in var_df.loc[date_or_id==1]['index'].values.tolist():
if len(train[col].value_counts()) == len(train):
if train[col].min() < 1900 or train[col].max() > 2050:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
var_df.loc[var_df['index']==col,'id_col'] = 1
else:
if train[col].min() < 1900 or train[col].max() > 2050:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
try:
pd.to_datetime(train[col],infer_datetime_format=True)
var_df.loc[var_df['index']==col,'date_time'] = 1
except:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'int'] = 1
else:
pass
int_vars = list(var_df[(var_df['int'] ==1)]['index'])
date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])
id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])
sum_all_cols['int_vars'] = int_vars
copy_date_vars = copy.deepcopy(date_vars)
for date_var in copy_date_vars:
#### This test is to make sure sure date vars are actually date vars
try:
pd.to_datetime(train[date_var],infer_datetime_format=True)
except:
##### if not a date var, then just add it to delete it from processing
cols_delete.append(date_var)
date_vars.remove(date_var)
sum_all_cols['date_vars'] = date_vars
sum_all_cols['id_vars'] = id_vars
sum_all_cols['cols_delete'] = cols_delete
## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!
var_df['numeric'] = 0
float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',
'float32','float64'] else 0,
axis=1)
if len(var_df.loc[float_or_cat == 1]) > 0:
for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():
if len(train[col].value_counts()) > 2 and len(train[col].value_counts()
) <= float_limit and len(train[col].value_counts()) <= len(train):
var_df.loc[var_df['index']==col,'cat'] = 1
else:
if col not in num_bool_vars:
var_df.loc[var_df['index']==col,'numeric'] = 1
cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])
continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])
######## V E R Y I M P O R T A N T ###################################################
##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###
cat_vars_copy = copy.deepcopy(cat_vars)
for cat in cat_vars_copy:
if df_preds[cat].dtype==float:
continuous_vars.append(cat)
cat_vars.remove(cat)
var_df.loc[var_df['index']==cat,'cat'] = 0
var_df.loc[var_df['index']==cat,'numeric'] = 1
elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:
id_vars.append(cat)
cat_vars.remove(cat)
var_df.loc[var_df['index']==cat,'cat'] = 0
var_df.loc[var_df['index']==cat,'id_col'] = 1
sum_all_cols['cat_vars'] = cat_vars
sum_all_cols['continuous_vars'] = continuous_vars
sum_all_cols['id_vars'] = id_vars
###### This is where you consoldate the numbers ###########
var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))
for col, sumval in var_dict_sum.items():
if sumval == 0:
print('%s of type=%s is not classified' %(col,train[col].dtype))
elif sumval > 1:
print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))
else:
pass
############### This is where you print all the types of variables ##############
####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,
### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete
if verbose == 1:
print(" Number of Numeric Columns = ", len(continuous_vars))
print(" Number of Integer-Categorical Columns = ", len(int_vars))
print(" Number of String-Categorical Columns = ", len(cat_vars))
print(" Number of Factor-Categorical Columns = ", len(factor_vars))
print(" Number of String-Boolean Columns = ", len(string_bool_vars))
print(" Number of Numeric-Boolean Columns = ", len(num_bool_vars))
print(" Number of Discrete String Columns = ", len(discrete_string_vars))
print(" Number of NLP String Columns = ", len(nlp_vars))
print(" Number of Date Time Columns = ", len(date_vars))
print(" Number of ID Columns = ", len(id_vars))
print(" Number of Columns to Delete = ", len(cols_delete))
if verbose == 2:
marthas_columns(df_preds,verbose=1)
print(" Numeric Columns: %s" %continuous_vars[:max_cols_to_print])
print(" Integer-Categorical Columns: %s" %int_vars[:max_cols_to_print])
print(" String-Categorical Columns: %s" %cat_vars[:max_cols_to_print])
print(" Factor-Categorical Columns: %s" %factor_vars[:max_cols_to_print])
print(" String-Boolean Columns: %s" %string_bool_vars[:max_cols_to_print])
print(" Numeric-Boolean Columns: %s" %num_bool_vars[:max_cols_to_print])
print(" Discrete String Columns: %s" %discrete_string_vars[:max_cols_to_print])
print(" NLP text Columns: %s" %nlp_vars[:max_cols_to_print])
print(" Date Time Columns: %s" %date_vars[:max_cols_to_print])
print(" ID Columns: %s" %id_vars[:max_cols_to_print])
print(" Columns that will not be considered in modeling: %s" %cols_delete[:max_cols_to_print])
##### now collect all the column types and column names into a single dictionary to return!
len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])
if len_sum_all_cols == orig_cols_total:
print(' %d Predictors classified...' %orig_cols_total)
print(' This does not include the Target column(s)')
else:
print('No of columns classified %d does not match %d total cols. Continuing...' %(
len_sum_all_cols, orig_cols_total))
ls = sum_all_cols.values()
flat_list = [item for sublist in ls for item in sublist]
if len(left_subtract(list(train),flat_list)) == 0:
print(' Missing columns = None')
else:
print(' Missing columns = %s' %left_subtract(list(train),flat_list))
return sum_all_cols
#################################################################################
from collections import Counter
import time
from sklearn.feature_selection import chi2, mutual_info_regression, mutual_info_classif
from sklearn.feature_selection import SelectKBest
##################################################################################
def load_file_dataframe(dataname, sep=",", header=0, verbose=0):
start_time = time.time()
########################### This is where we load file or data frame ###############
if isinstance(dataname,str):
#### this means they have given file name as a string to load the file #####
if dataname != '' and dataname.endswith(('csv')):
codex = ['utf-8', 'iso-8859-1', 'cp1252', 'latin1']
for code in codex:
try:
dfte = pd.read_csv(dataname,sep=sep,index_col=None,encoding=code)
print('Encoder %s chosen to read CSV file' %code)
print('Shape of your Data Set loaded: %s' %(dfte.shape,))
return dfte
except:
print('Encoding codex %s does not work for this file' %code)
continue
elif dataname.endswith(('xlsx','xls','txt')):
#### It's very important to get header rows in Excel since people put headers anywhere in Excel#
dfte = pd.read_excel(dataname,header=header)
print('Shape of your Data Set loaded: %s' %(dfte.shape,))
return dfte
else:
print('File not able to be loaded')
return
if isinstance(dataname,pd.DataFrame):
#### this means they have given a dataframe name to use directly in processing #####
dfte = copy.deepcopy(dataname)
return dfte
else:
print('Dataname input must be a filename with path to that file or a Dataframe')
return
##################################################################################
# Removes duplicates from a list to return unique values - USED ONLYONCE
def find_remove_duplicates(values):
output = []
seen = set()
for value in values:
if value not in seen:
output.append(value)
seen.add(value)
return output
#################################################################################
#### Regression or Classification type problem
def analyze_problem_type(train, target, verbose=0) :
target = copy.deepcopy(target)
cat_limit = 30 ### this determines the number of categories to name integers as classification ##
float_limit = 15 ### this limits the number of float variable categories for it to become cat var
if isinstance(target, str):
target = [target]
if len(target) == 1:
targ = target[0]
model_label = 'Single_Label'
else:
targ = target[0]
model_label = 'Multi_Label'
#### This is where you detect what kind of problem it is #################
if train[targ].dtype in ['int64', 'int32','int16']:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 2 and len(train[targ].unique()) <= cat_limit:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
elif train[targ].dtype in ['float']:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
elif len(train[targ].unique()) > 2 and len(train[targ].unique()) <= float_limit:
model_class = 'Multi_Classification'
else:
model_class = 'Regression'
else:
if len(train[targ].unique()) <= 2:
model_class = 'Binary_Classification'
else:
model_class = 'Multi_Classification'
########### print this for the start of next step ###########
if verbose <= 1:
print('''################ %s %s Feature Selection Started #####################''' %(
model_label,model_class))
return model_class
#####################################################################################
from collections import defaultdict
from collections import OrderedDict
import time
def return_dictionary_list(lst_of_tuples):
""" Returns a dictionary of lists if you send in a list of Tuples"""
orDict = defaultdict(list)
# iterating over list of tuples
for key, val in lst_of_tuples:
orDict[key].append(val)
return orDict
##################################################################################
def remove_variables_using_fast_correlation(df, numvars, modeltype, target,
corr_limit = 0.70,verbose=0):
"""
#### THIS METHOD IS KNOWN AS SULOV METHOD in HONOR OF my mother SULOCHANA SESHADRI #######
##### SULOV stands for Searching Uncorrelated List Of Variables ############
This highly efficient method removes variables that are highly correlated using a series of
pair-wise correlation knockout rounds. It is extremely fast and hence can work on thousands
of variables in less than a minute, even on a laptop. You need to send in a list of numeric
variables and that's all! The method defines high Correlation as anything over 0.70 (absolute)
but this can be changed. If two variables have absolute correlation higher than this, they
will be marked, and using a process of elimination, one of them will get knocked out:
To decide order of variables to keep, we use mutuail information score to select. MIS returns
a ranked list of these correlated variables: when we select one, we knock out others
that it is correlated to. Then we select next var. This way we knock out correlated variables.
Finally we are left with uncorrelated variables that are also highly important in mutual score.
############## YOU MUST INCLUDE THE ABOVE MESSAGE IF YOU COPY THIS CODE IN YOUR LIBRARY #####
"""
import copy
target = copy.deepcopy(target)
print('Searching for highly correlated variables from %d variables using SULOV method' %len(numvars))
print('##### SULOV : Searching for Uncorrelated List Of Variables (takes time...) ############')
correlation_dataframe = df[numvars].corr().abs().astype(np.float16)
######### This is how you create a dictionary of which var is highly correlated to a list of vars ####
corr_values = correlation_dataframe.values
col_index = correlation_dataframe.columns.tolist()
index_triupper = list(zip(np.triu_indices_from(corr_values,k=1)[0],np.triu_indices_from(
corr_values,k=1)[1]))
high_corr_index_list = [x for x in np.argwhere(abs(corr_values[np.triu_indices(len(corr_values), k = 1)])>=corr_limit)]
low_corr_index_list = [x for x in np.argwhere(abs(corr_values[np.triu_indices(len(corr_values), k = 1)])<corr_limit)]
tuple_list = [y for y in [index_triupper[x[0]] for x in high_corr_index_list]]
correlated_pair = [(col_index[tuple[0]],col_index[tuple[1]]) for tuple in tuple_list]
corr_pair_dict = dict(return_dictionary_list(correlated_pair))
keys_in_dict = list(corr_pair_dict.keys())
reverse_correlated_pair = [(y,x) for (x,y) in correlated_pair]
reverse_corr_pair_dict = dict(return_dictionary_list(reverse_correlated_pair))
for key, val in reverse_corr_pair_dict.items():
if key in keys_in_dict:
if len(key) > 1:
corr_pair_dict[key] += val
else:
corr_pair_dict[key] = val
#### corr_pair_dict is used later to make the network diagram to see which vars are correlated to which
# Selecting upper triangle of correlation matrix ## this is a fast way to find highly correlated vars
upper_tri = correlation_dataframe.where(np.triu(np.ones(correlation_dataframe.shape),
k=1).astype(np.bool))
empty_df = upper_tri[abs(upper_tri)>corr_limit]
### if none of the variables are highly correlated, you can skip this whole drawing
if empty_df.isnull().all().all():
print(' No highly correlated variables in data set to remove. All selected...')
return numvars
#### It's important to find the highly correlated features first #############
lower_tri = correlation_dataframe.where(np.tril(np.ones(correlation_dataframe.shape),
k=-1).astype(np.bool))
lower_df = lower_tri[abs(lower_tri)>corr_limit]
corr_list = empty_df.columns[[not(empty_df[x].isnull().all()) for x in list(empty_df)]].tolist(
)+lower_df.columns[[not(lower_df[x].isnull().all()) for x in list(lower_df)]].tolist()
corr_list = find_remove_duplicates(corr_list)
###### This is for ordering the variables in the highest to lowest importance to target ###
if len(corr_list) == 0:
final_list = list(correlation_dataframe)
print('Selecting all (%d) variables since none of them are highly correlated...' %len(numvars))
return numvars
else:
if isinstance(target, list):
target = target[0]
max_feats = len(corr_list)
if modeltype == 'Regression':
sel_function = mutual_info_regression
fs = SelectKBest(score_func=sel_function, k=max_feats)
else:
sel_function = mutual_info_classif
fs = SelectKBest(score_func=sel_function, k=max_feats)
try:
fs.fit(df[corr_list].astype(np.float16), df[target])
mutual_info = dict(zip(corr_list,fs.scores_))
#### The first variable in list has the highest correlation to the target variable ###
sorted_by_mutual_info =[key for (key,val) in sorted(mutual_info.items(), key=lambda kv: kv[1],reverse=True)]
##### Now we select the final list of correlated variables ###########
selected_corr_list = []
#### You have to make multiple copies of this sorted list since it is iterated many times ####
orig_sorted = copy.deepcopy(sorted_by_mutual_info)
copy_sorted = copy.deepcopy(sorted_by_mutual_info)
copy_pair = copy.deepcopy(corr_pair_dict)
#### select each variable by the highest mutual info and see what vars are correlated to it
for each_corr_name in copy_sorted:
### add the selected var to the selected_corr_list
selected_corr_list.append(each_corr_name)
for each_remove in copy_pair[each_corr_name]:
#### Now remove each variable that is highly correlated to the selected variable
if each_remove in copy_sorted:
copy_sorted.remove(each_remove)
##### Now we combine the uncorrelated list to the selected correlated list above
rem_col_list = left_subtract(list(correlation_dataframe),corr_list)
final_list = rem_col_list + selected_corr_list
removed_cols = left_subtract(numvars, final_list)
except:
print(' SULOV Method crashing due to memory error, trying alternative simpler method...')
#### Dropping highly correlated Features fast using simple linear correlation ###
removed_cols = remove_highly_correlated_vars_fast(train[numvars],corr_limit)
final_list = left_subtract(numvars, removed_cols)
if len(removed_cols) > 0:
print(' Removing (%d) highly correlated variables:' %(len(removed_cols)))
if len(removed_cols) <= 30:
print(' %s' %removed_cols)
if len(final_list) <= 30:
print(' Following (%d) vars selected: %s' %(len(final_list),final_list))
############## D R A W C O R R E L A T I O N N E T W O R K ##################
selected = copy.deepcopy(final_list)
try:
import networkx as nx
except:
print(' Python networkx library not installed. Install it for feature selection visualization.')
return
#### Now start building the graph ###################
gf = nx.Graph()
### the mutual info score gives the size of the bubble ###
multiplier = 2100
for each in orig_sorted:
gf.add_node(each, size=int(max(1,mutual_info[each]*multiplier)))
######### This is where you calculate the size of each node to draw
sizes = [mutual_info[x]*multiplier for x in list(gf.nodes())]
#### The sizes of the bubbles for each node is determined by its mutual information score value
corr = df[corr_list].corr()
high_corr = corr[abs(corr)>corr_limit]
## high_corr is the dataframe of a few variables that are highly correlated to each other
combos = combinations(corr_list,2)
### this gives the strength of correlation between 2 nodes ##
multiplier = 20
for (var1, var2) in combos:
if | np.isnan(high_corr.loc[var1,var2]) | numpy.isnan |
import math
from mpl_toolkits.mplot3d import Axes3D, axes3d
import warnings
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
import itertools
from src import constants
from src.my_utils.constant_class import *
from src.my_utils.my_math.line import distance_btw_two_point
class PotentialType:
Repulsive_potential = "Repulsive_potential"
Attractive_potential = "Attractive_potential"
Camera_potential_steps = "camera_potential_steps"
Camera_potential_quadratic = "camera_potential_quadratic"
class PotentialShape:
Circular = "circular"
Angle = "angle"
Linear_X_direction = "Linear X unbound"
Linear_X_direction_bound = "Linear X bound"
Linear_Y_direction = "Linear Y unbound"
Linear_Y_direction_bound = "Linear Y bound"
class HeatMaps:
"""
Maps are composed of multiple points
[(x,y,data_saved-controller-xi,rho_0,PotentialType),...]
x and y are the coordinate of the point where we set the potential function
data_saved-controller-xi is a factor between 0 and 1, to give more or less importance to the point
rho_0 is the radius in which the potential is determined
PotentialType is the function we want to use
If the potential are set correctly and are not to close from each other than the map gives always
a value between 0 and 1.
"""
"""All cam field"""
def HEAT_MAP_INSIDE_OF_FIELD():
return [(0,0,0,1,1,1,1000,PotentialType.Camera_potential_steps)]
"""For one target"""
def HEAT_MAP_ONE_TARGET_CENTER(field_depth):
return [(constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth,0, 0,1,1,1,1,PotentialType.Camera_potential_quadratic)]
"""For two targets"""
def HEAT_MAP_TWO_TARGET_CENTER(field_depth,beta):
x = constants.DISTANCE_TO_KEEP_FROM_TARGET*field_depth * math.cos(beta / 4)
y = 1.5*constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.sin(beta / 4)
return [(x, y,0,1,2,1,2,PotentialType.Camera_potential_quadratic), (x,-y,0,1,2,1,2,PotentialType.Camera_potential_quadratic)]
def HEAT_MAP_TWO_TARGET_FAR(field_depth,beta,side=1):
return [(0.8*field_depth*math.cos(beta/ 4),side*0.3*field_depth * math.sin(beta / 4), side*0, 2, 1, 1, 1.5,PotentialType.Camera_potential_quadratic),
(0.3*field_depth*math.cos(beta/ 4),side*-0.1*field_depth * math.sin(beta/ 4),side*55, 1, 15, 1, 0.75,PotentialType.Camera_potential_quadratic)]
"""For three targets"""
def HEAT_MAP_THREE_TARGET(field_depth,beta):
x = constants.DISTANCE_TO_KEEP_FROM_TARGET*field_depth * math.cos(beta / 4)
y = 1.5*constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.sin(beta / 4)
return [(x, y,0,1,1,1,0.5,PotentialType.Camera_potential_steps),
(x,-y,0,1,1,1,0.5,PotentialType.Camera_potential_steps),
(x+2, 0, 0, 1, 1, 1, 0.5, PotentialType.Camera_potential_steps)]
def HEAT_MAP_TWO_TARGET_OVERLAP(field_depth, beta):
x = constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.cos(beta / 4)
y = 1.5 * constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.sin(beta / 4)
return [(x, y, 0, 1, 2, 1, 2.5, PotentialType.Camera_potential_quadratic),
(x, -y, 0, 1, 2, 1, 2.5, PotentialType.Camera_potential_quadratic)]
def rotate_vector_field_angle(angle, X, Y):
norm = np.float_power(np.square(X) + np.square(Y), 0.5)
old_angle = np.arctan2(Y, X)
X = norm * np.cos(angle + old_angle)
Y = norm * np.sin(angle + old_angle)
return X, Y
def rotate_map_from_angle_alpha(angle, x, y, x_mean, y_mean):
x_offset = x - x_mean
y_offset = y - y_mean
x_rotate = math.cos(angle) * x_offset + math.sin(angle) * y_offset
y_rotate = -math.sin(angle) * x_offset + math.cos(angle) * y_offset
return x_rotate, y_rotate
def unrotate_map_from_angle_alpha(angle, x, y, x_mean, y_mean):
x_rotate = math.cos(angle) * x + math.sin(angle) * y
y_rotate = -math.sin(angle) * x + math.cos(angle) * y
x = x_rotate + x_mean
y = y_rotate + y_mean
return x, y
def define_potential_shape(shape, X=None, mean_x=None, var_x=None, Y=None, mean_y=None, var_y=None, X_min=None,
X_max=None, Y_min=None, Y_max=None, angle_min=None, angle_max=None):
if shape == PotentialShape.Circular and X is not None and Y is not None:
distances = np.power(np.square(X - mean_x) / var_x + np.square(Y - mean_y) / var_y, 0.5)
angle = np.arctan2(Y - mean_y, X - mean_x)
elif shape == PotentialShape.Angle and X is not None and Y is not None and angle_min is not None and angle_max is not None:
distances = np.power(np.square(X - mean_x) / var_x + np.square(Y - mean_y) / var_y, 0.5)
angle = np.arctan2(Y - mean_y, X - mean_x)
distances = np.where(angle < angle_max, distances, -1)
distances = np.where(angle > angle_min, distances, -1)
elif shape == PotentialShape.Linear_X_direction and Y is not None:
distances = np.square((Y - mean_y) / var_y)
angle = np.arctan2(Y - mean_y, 0)
elif shape == PotentialShape.Linear_Y_direction and X is not None:
distances = np.square((X - mean_x) / var_x)
angle = np.arctan2(0, X - mean_x)
else:
print("define_potential_shape : choice not found or values not set correctly")
if not X is None:
distances = np.zeros(np.shape(X))
angle = distances
elif not Y is None:
distances = np.zeros(np.shape(Y))
angle = distances
else:
distances = 0
angle = distances
return distances, angle
def define_potential_type(field_type, distances, xi=None, eta=None, rho_0=None):
if field_type == PotentialType.Repulsive_potential and eta is not None and rho_0 is not None:
distances = np.where(distances > 0.001 * rho_0, distances, rho_0)
distances = np.where(distances <= rho_0, distances, rho_0)
return 0.5 * eta * np.square(1 / distances - 1 / rho_0)
elif field_type == PotentialType.Attractive_potential and xi is not None:
distances = np.where(distances > -1, distances, 0)
return 0.5 * xi * np.square(distances)
elif field_type == PotentialType.Camera_potential_steps and xi is not None:
function = np.zeros(np.shape(distances))
function = np.where(distances > rho_0, function, xi)
return function
elif field_type == PotentialType.Camera_potential_quadratic and xi is not None:
function = np.zeros(np.shape(distances))
function = np.where(distances > rho_0,function,(xi*(np.square(rho_0)-np.square(distances))/np.square(rho_0)))
return function
else:
print("error potential type note found")
def define_grad_potential_type(choix, distances, xi=None, eta=None, rho_0=None):
try:
if choix == PotentialType.Repulsive_potential:
distances = np.where(distances > 0.001 * rho_0, distances, rho_0)
distances = np.where(distances <= rho_0, distances, rho_0)
return 0.5 * eta * (1 / distances - 1 / rho_0) * np.square(1 / rho_0)
elif choix == PotentialType.Attractive_potential:
return -xi * distances
else:
print("error potential type not found")
except ZeroDivisionError:
warnings.warn("divition by rho_0=0 but don't care for now")
return 0
def compute_part_of_potential_field(field_type, shape, X=None, Y=None, mean_x=None, mean_y=None, var_x=None, var_y=None,
X_min=None, X_max=None, Y_min=None, Y_max=None, angle_min=None, angle_max=None,
xi=None, eta=None, rho_0=None):
distances, angle = define_potential_shape(shape=shape, X=X, Y=Y, mean_x=mean_x, mean_y=mean_y, var_x=var_x,
var_y=var_y, X_min=X_min, X_max=X_max, Y_min=Y_min, Y_max=Y_max,
angle_min=angle_min, angle_max=angle_max)
potential = define_potential_type(field_type, distances, xi=xi, eta=eta, rho_0=rho_0)
return potential
def compute_part_of_grad_potential_field(field_type, shape, X=None, Y=None, mean_x=None, mean_y=None, var_x=None,
var_y=None,X_min=None, X_max=None, Y_min=None, Y_max=None, angle_min=None, angle_max=None,
xi=None, eta=None, rho_0=None):
distances, angle = define_potential_shape(shape=shape, X=X, Y=Y, mean_x=mean_x, mean_y=mean_y, var_x=var_x,
var_y=var_y, X_min=X_min, X_max=X_max, Y_min=Y_min, Y_max=Y_max,
angle_min=angle_min, angle_max=angle_max)
grad_x = define_grad_potential_type(field_type, distances, xi=xi, eta=eta, rho_0=rho_0) * np.cos(angle)
grad_y = define_grad_potential_type(field_type, distances, xi=xi, eta=eta, rho_0=rho_0) * np.sin(angle)
return grad_x, grad_y
def compute_grad_potential_for_a_given_list(target_list, X, Y, field_type, barrier_type, xi=None, eta=None, rho_0=None):
force_x = np.zeros(np.shape(X))
force_y = np.zeros(np.shape(X))
rho_0_None = False
if rho_0 is None:
rho_0_None = True
if target_list == []:
return force_x, force_y
elif len(target_list) == 1:
x, y, radius = target_list[0]
if rho_0_None:
rho_0 = radius
delta_force_x, delta_force_y = compute_part_of_grad_potential_field(
field_type=field_type, shape=PotentialShape.Circular, X=X, mean_x=x, var_x=1,
Y=Y, mean_y=y, var_y=1, xi=xi, eta=eta, rho_0=rho_0)
force_x += delta_force_x
force_y += delta_force_y
return force_x, force_y
else:
for target in target_list:
x, y, radius = target
if rho_0_None:
rho_0 = radius
delta_force_x, delta_force_y = compute_part_of_grad_potential_field(
field_type=field_type, shape=PotentialShape.Circular, X=X, mean_x=x, var_x=1,
Y=Y, mean_y=y, var_y=1, xi=xi, eta=eta, rho_0=rho_0)
force_x += delta_force_x
force_y += delta_force_y
for targets in itertools.combinations(target_list, 2):
target1, target2 = targets
x1, y1, radius1 = target1
x2, y2, radius2 = target2
if rho_0_None:
rho_0 = max(radius1, radius2)
"""First rotation to place x-axis between the to targets"""
x_mean = (x1 + x2) / 2
y_mean = (y1 + y2) / 2
delta_x = x1 - x2
delta_y = y1 - y2
distance = distance_btw_two_point(x1, y1, x2, y2)
angle = math.atan2(delta_y, delta_x)
X, Y = rotate_map_from_angle_alpha(angle, X, Y, x_mean, y_mean)
"""Computation from the vectors"""
delta_force_x, delta_force_y = np.zeros(np.shape(force_x)), np.zeros(np.shape(force_y))
if barrier_type == PotentialBarrier.Hard:
delta_force_x, delta_force_y = compute_part_of_grad_potential_field(field_type=field_type,
shape=PotentialShape.Linear_X_direction,
Y=Y, mean_y=0, var_y=1, xi=xi,
eta=eta,
rho_0=rho_0)
elif barrier_type == PotentialBarrier.Smooth:
delta_force_x, delta_force_y = compute_part_of_grad_potential_field(
field_type=field_type, shape=PotentialShape.Circular, X=X, mean_x=0,
var_x=1 + constants.COEFF_VAR_X * distance,Y=Y, mean_y=0, var_y=1 + constants.COEFF_VAR_Y * distance,
xi=xi, eta=eta, rho_0=rho_0)
elif barrier_type == PotentialBarrier.Combine_repulse:
delta_force_x_hard, delta_force_y_hard = compute_part_of_grad_potential_field(field_type=field_type,
shape=PotentialShape.Linear_X_direction,
Y=Y, mean_y=0, var_y=1,
xi=xi,
eta=eta,
rho_0=rho_0)
delta_force_x_smooth, delta_force_y_smooth = compute_part_of_grad_potential_field(
field_type=field_type, shape=PotentialShape.Circular, X=X, mean_x=0,
var_x=1 + constants.COEFF_VAR_X * distance,
Y=Y, mean_y=0, var_y=1 + constants.COEFF_VAR_Y * distance, xi=xi,
eta=eta, rho_0=rho_0)
delta_force_x, delta_force_y = delta_force_x_hard * (
1 - constants.COMBINE_MODE_PROP) + delta_force_x_smooth * constants.COMBINE_MODE_PROP, delta_force_y_hard * (
1 - constants.COMBINE_MODE_PROP) + delta_force_y_smooth * constants.COMBINE_MODE_PROP
elif barrier_type == PotentialBarrier.Combine_attract:
delta_force_x_hard, delta_force_y_hard = compute_part_of_grad_potential_field(field_type=field_type,
shape=PotentialShape.Linear_X_direction,
Y=Y, mean_y=0, var_y=1,
xi=xi,
eta=eta,
rho_0=rho_0)
delta_force_x_smooth, delta_force_y_smooth = compute_part_of_grad_potential_field(
field_type=field_type, shape=PotentialShape.Circular, X=X, mean_x=0,
var_x=1 + constants.COEFF_VAR_X * distance,
Y=Y, mean_y=0, var_y=1 + constants.COEFF_VAR_Y * distance, xi=xi,
eta=eta, rho_0=rho_0)
delta_force_x_smooth = -delta_force_x_smooth
delta_force_y_smooth = -delta_force_y_smooth
delta_force_x, delta_force_y = delta_force_x_hard * (
1 - constants.COMBINE_MODE_PROP) + delta_force_x_smooth * constants.COMBINE_MODE_PROP, delta_force_y_hard * (
1 - constants.COMBINE_MODE_PROP) + delta_force_y_smooth * constants.COMBINE_MODE_PROP
elif barrier_type == PotentialBarrier.Not_use:
pass
delta_force_x_rotate, delta_force_y_rotate = rotate_vector_field_angle(angle, delta_force_x,
delta_force_y)
force_x += delta_force_x_rotate
force_y += delta_force_y_rotate
"Back to orignal ref"
X, Y = unrotate_map_from_angle_alpha(-angle, X, Y, x_mean, y_mean)
return force_x, force_y
def compute_potential_for_a_given_list(target_list, X, Y, field_type, barrier_type, xi=None, eta=None, rho_0=None):
potential_field = np.zeros(np.shape(X))
rho_0_None = False
if rho_0 is None:
rho_0_None = True
if target_list == []:
return potential_field
elif len(target_list) == 1:
x, y, radius = target_list[0]
if rho_0_None:
rho_0 = radius
potential_field += compute_part_of_potential_field(field_type=field_type,
shape=PotentialShape.Circular,
X=X, mean_x=x, var_x=1,
Y=Y, mean_y=y, var_y=1,
xi=xi, eta=eta, rho_0=rho_0)
return potential_field
else:
for target in target_list:
x, y, radius = target
if rho_0_None:
rho_0 = radius
potential_field += compute_part_of_potential_field(field_type=field_type,
shape=PotentialShape.Circular,
X=X, mean_x=x, var_x=1,
Y=Y, mean_y=y, var_y=1,
xi=xi, eta=eta, rho_0=rho_0)
for targets in itertools.combinations(target_list, 2):
target1, target2 = targets
x1, y1, radius1 = target1
x2, y2, radius2 = target2
if rho_0_None:
rho_0 = max(radius1, radius2)
"""First rotation to place x-axis between the to targets"""
x_mean = (x1 + x2) / 2
y_mean = (y1 + y2) / 2
delta_x = x1 - x2
delta_y = y1 - y2
distance = distance_btw_two_point(x1, y1, x2, y2)
angle = math.atan2(delta_y, delta_x)
X, Y = rotate_map_from_angle_alpha(angle, X, Y, x_mean, y_mean)
"""Computation from the field"""
if barrier_type == PotentialBarrier.Hard:
potential_field += compute_part_of_potential_field(field_type=field_type,
shape=PotentialShape.Linear_X_direction,
Y=Y, mean_y=0, var_y=1, xi=xi, eta=eta,
rho_0=rho_0)
elif barrier_type == PotentialBarrier.Smooth:
potential_field += compute_part_of_potential_field(field_type=field_type,
shape=PotentialShape.Circular,
X=X, mean_x=0,
var_x=1 + constants.COEFF_VAR_X * distance,
Y=Y, mean_y=0,
var_y=1 + constants.COEFF_VAR_Y * distance,
xi=xi, eta=eta, rho_0=rho_0)
elif barrier_type == PotentialBarrier.Combine_repulse:
potential_field += (1 - constants.COMBINE_MODE_PROP) * compute_part_of_potential_field(
field_type=field_type,
shape=PotentialShape.Linear_X_direction,
Y=Y, mean_y=0, var_y=1, xi=xi, eta=eta,
rho_0=rho_0)
potential_field += constants.COMBINE_MODE_PROP * compute_part_of_potential_field(field_type=field_type,
shape=PotentialShape.Circular,
X=X, mean_x=0,
var_x=1 + constants.COEFF_VAR_X * distance,
Y=Y, mean_y=0,
var_y=1 + constants.COEFF_VAR_Y * distance,
xi=xi, eta=eta,
rho_0=rho_0)
elif barrier_type == PotentialBarrier.Combine_attract:
potential_field += (1 - constants.COMBINE_MODE_PROP) * compute_part_of_potential_field(
field_type=field_type,
shape=PotentialShape.Linear_X_direction,
Y=Y, mean_y=0, var_y=1, xi=xi, eta=eta,
rho_0=rho_0)
potential_field -= constants.COMBINE_MODE_PROP * compute_part_of_potential_field(field_type=field_type,
shape=PotentialShape.Circular,
X=X, mean_x=0,
var_x=1 + constants.COEFF_VAR_X * distance,
Y=Y, mean_y=0,
var_y=1 + constants.COEFF_VAR_Y * distance,
xi=xi, eta=eta,
rho_0=rho_0)
elif barrier_type == PotentialBarrier.Not_use:
pass
"Back to orignal ref"
X, Y = unrotate_map_from_angle_alpha(-angle, X, Y, x_mean, y_mean)
return potential_field
def compute_potential_gradient(X, Y, target_list, obstacle_list):
attractive_force_x, attractive_force_y = compute_grad_potential_for_a_given_list(target_list, X, Y,
PotentialType.Attractive_potential,
constants.BARRIER_TYPE,
xi=constants.XI, eta=constants.ETA,
rho_0=-1)
repulsive_force_x, repulsive_force_y = compute_grad_potential_for_a_given_list(obstacle_list, X, Y,
PotentialType.Repulsive_potential,
constants.BARRIER_TYPE,
xi=constants.XI, eta=constants.ETA,
rho_0=None)
force_x = attractive_force_x + repulsive_force_x
force_y = attractive_force_y + repulsive_force_y
return force_x, force_y
def compute_potential(X, Y, target_list, obstacle_list):
attractive_potential_field = compute_potential_for_a_given_list(target_list, X, Y,
PotentialType.Attractive_potential,
constants.BARRIER_TYPE,
xi=constants.XI, eta=constants.ETA,
rho_0=-1)
repulsive_potential_field = compute_potential_for_a_given_list(obstacle_list, X, Y,
PotentialType.Repulsive_potential,
constants.BARRIER_TYPE,
xi=constants.XI, eta=constants.ETA, rho_0=None)
return attractive_potential_field + repulsive_potential_field
def compute_potential_and_potential_gradient(X_potential_field, Y_potential_field, X_vector_field, Y_vector_field,
target_list, obstacle_list):
potential_field = compute_potential(X_potential_field, Y_potential_field, target_list, obstacle_list)
force_x, force_y = compute_potential_gradient(X_vector_field, Y_vector_field, target_list, obstacle_list)
return potential_field, force_x, force_y
def compute_potential_field_cam(X, Y, n_target, beta, field_depth):
'''potential_field = compute_part_of_potential_field(PotentialType.Attractive_potential,PotentialShape.Angle, X=X_potential, mean_x=0, var_x=1, Y=Y_potential, mean_y=0, var_y=1,
angle_min=-math.radians(30), angle_max=math.radians(30),data_saved-controller-xi = 10)
'''
X_potential_field, Y_potential_field = np.meshgrid(X, Y)
potential_field = np.zeros(np.shape(X_potential_field))
camera_shape, angle = define_potential_shape(PotentialShape.Angle, X=X_potential_field, mean_x=0, var_x=1,
Y=Y_potential_field,
mean_y=0, var_y=1, angle_min=-beta / 2, angle_max=beta / 2)
heat_map = []
if n_target == 1:
heat_map = HeatMaps.HEAT_MAP_ONE_TARGET_CENTER(field_depth)
heat_map = HeatMaps.HEAT_MAP_INSIDE_OF_FIELD()
elif n_target >= 2:
heat_map = HeatMaps.HEAT_MAP_TWO_TARGET_CENTER(field_depth,beta)
heat_map = HeatMaps.HEAT_MAP_TWO_TARGET_FAR(field_depth,beta,-1)
heat_map = HeatMaps.HEAT_MAP_ONE_TARGET_CENTER(field_depth) + HeatMaps.HEAT_MAP_TWO_TARGET_FAR(field_depth,beta,-1)
#heat_map = HeatMaps.HEAT_MAP_THREE_TARGET(field_depth,beta)
for heat_point in heat_map:
x, y,angle,var_x,var_y, xi,rho,potential_type = heat_point
X, Y = rotate_map_from_angle_alpha(math.radians(angle), X_potential_field, Y_potential_field, x, y)
potential_basis = compute_part_of_potential_field(field_type=potential_type,
shape=PotentialShape.Circular,
X=X, mean_x=0, var_x=var_x,
Y=Y, mean_y=0, var_y=var_y,
xi=xi, rho_0=rho)
#potential_field += potential_basis
potential_field = np.maximum(potential_field,potential_basis)
potential_field = np.where(camera_shape > 0, potential_field, 0)
potential_field = np.where(camera_shape < field_depth, potential_field, 0)
return X_potential_field, Y_potential_field, np.minimum(potential_field,1.2)/n_target
def convert_target_list_to_potential_field_input(target_list):
input_list = []
for target in target_list:
input_list.append((target.xc, target.yc, constants.COEFF_RADIUS * target.radius))
return input_list
def plot_potential_field_dynamic(Xp, Yp, potential_field):
import src.plot as plot
if plot.PLOT_VARIATION_ON_REGION:
surf = plot.ax1.plot_surface(Xp, Yp, potential_field, cmap="hot",
linewidth=1, antialiased=True)
plt.draw()
plt.pause(1e-17)
plt.cla()
def plot_xi_rho():
x = np.linspace(0,2.5,100)
y_all = []
rho_all = [0.5,1.0,1.5,2]
colors = ["g","r","c","b"]
for rho in rho_all:
y_all.append(define_potential_type(PotentialType.Camera_potential_quadratic, x, xi=1, eta=None, rho_0=rho))
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(1, 1, 1)
ax1.xaxis.set_tick_params(labelsize=20)
ax1.yaxis.set_tick_params(labelsize=20)
ax1.set_title("Basis functions profil in terms of rho", fontsize=25, fontweight='bold')
for color,y in zip(colors,y_all):
ax1.plot(x,y,c=color)
y_all = []
for rho in rho_all:
y_all.append(define_potential_type(PotentialType.Camera_potential_steps, x, xi=1, eta=None, rho_0=rho))
for color, y in zip(colors, y_all):
ax1.plot(x, y, '--',c=color ,linewidth = 2)
ax1.legend(["rho = 0.5[m]","rho = 1.0[m]","rho = 1.5[m]","rho = 2.0[m]"],loc=1, fontsize=25)
plt.show()
def plot_potential_field(Xp, Yp, potential_field):
# Plot the surface.
fig = plt.figure(figsize=(24, 16))
ax1 = fig.add_subplot(1, 1, 1, projection='3d')
ax1.xaxis.set_tick_params(labelsize=20)
ax1.yaxis.set_tick_params(labelsize=20)
ax1.zaxis.set_tick_params(labelsize=20)
'''
ax1.plot_wireframe(Xp, Yp, potential_field, rstride=1 ,cstride=1)
X = np.ravel(Xp)
Y = np.ravel(Yp)
Z = np.ravel(potential_field)
sc1 = ax1.plot_trisurf(X, Y, Z, linewidth=0.5, antialiased=True)
cb = fig.colorbar(sc1, ax=ax1)
cb.ax.yaxis.set_tick_params(labelsize=20)
'''
#for angle in range(0, 360):
ax1.view_init(60,- 45)
n = 2
lev = np.array([0,0.2,0.4,0.6,0.8,1])/n
ax1.plot_surface(Xp, Yp, potential_field, rstride=1, cstride=1, alpha=0.8,cmap="hot")
cset = ax1.contour(Xp, Yp, potential_field,levels=lev, zdir='z', offset=1/n+0.05, cmap="hot")
#cset = ax1.contour(Xp, Yp,potential_field,levels= [3,4,5], zdir='x', offset= 0,colors=["red","black","red"])
#cset = ax1.contour(Xp, Yp, potential_field,levels=[-2,0,2], zdir='y', offset=5,colors=["red","black","red"])
plt.show()
def plot_potential_field_and_grad(Xp, Yp, Xf, Yf, potential_field, force_x, force_y,objectives_list,obstacle_list):
# Plot the surface.
fig = plt.figure(figsize=(16, 8))
#ax0 = fig.add_subplot(0, 0, 1)
ax1 = fig.add_subplot(1,2,1, projection='3d')
ax2 = fig.add_subplot(1,2,2)
M = | np.arctan2(force_x, force_y) | numpy.arctan2 |
# This file is the same as ICP07 with the exception that it doesn't interpolate the
# first array.
import numpy as np
from convertScanToXY import convertScanToXY
from scipy.spatial.distance import pdist,cdist
import operator
import pdb
import math
import time
# def distance(x1,y1,x2,y2):
# dist=math.sqrt(((float(x1)-float(x2))*(float(x1)-float(x2)))+((float(y1)-float(y2))*(float(y1)-float(y2))))
# return dist
def closestPoints(old,new):
distances = cdist(old,new,'euclidean')
# distances is a matrix where rows correspond to points in the 'old' matrix and
# cols correspond to points in the 'new' matrix.
indices = np.argmin(distances, axis = 0)
closest = old[indices]
return closest
def doOneIteration(XY1,XY2):
# Find the closest point in XY2 for each point in XY1
closestPts=closestPoints(XY1,XY2)
#pdb.set_trace()
# Find the covariance between the 2 matrices
cov=np.matmul((np.transpose(XY2)),closestPts)
# Use that to find the rotation
U, s, V = np.linalg.svd(cov, full_matrices=True)
rotationMatrix = np.matmul(V,U.T)
# Find the optimal translation
XY2 = np.matmul(XY2, rotationMatrix.T)
# Find their average translation from their corresponding closest points
diffs=XY2-closestPts
offset=np.mean(diffs,axis=0)
translation=-offset
# Calculate the error in alignment as the sum of squared distances between point matches
err=math.sqrt((offset[0])*(offset[0])+(offset[1])*(offset[1]))
return rotationMatrix, translation, err
def ICP08(XY1, XY2, transformationSeed):
maxDist = 50
# #start_time = time.time()
# #########################################
# # Interpolate the first scan as necessary
# #########################################
# # This is approximating point to plane ICP but instead of projecting onto the
# # normal, I am just creating more points in areas where things are sparse.
# numberOfPoints = XY1.shape[0]
# C = np.zeros([2,2]) # this is necessary in Python to use pdist
# for I in range(numberOfPoints - 1):
# A = XY1[I,:]
# B = XY1[I+1,:]
# C[0,:] = A # and is used to find the distance betwen them
# C[1,:] = B
# distance = pdist(C)
# #print 'distance: ', distance
# if distance > maxDist:
# XY1 = interpolatePoints(A,B, distance, maxDist, XY1)
# # Check the first and last points as a special case
# A = XY1[numberOfPoints-1,:]
# B = XY1[0,:]
# C[0,:] = A
# C[1,:] = B
# distance = pdist(C)
# if distance > maxDist:
# XY1 = interpolatePoints(A,B, distance, maxDist, XY1)
# #elapsed_time = time.time() - start_time
# print 'Stop point just before ICP'
# pdb.set_trace()
# ###############
# Do the ICP part
# ###############
translation, rotationMatrix = actualICP(XY1, XY2, transformationSeed)
# rotate and translate the XY2 points
XY2temp = | np.matmul(XY2,rotationMatrix.T) | numpy.matmul |
'''
Group enabled ANPNetwork class and supporting classes.
'''
from pyanp.pairwise import Pairwise
from pyanp.prioritizer import Prioritizer, PriorityType
from pyanp.general import islist, unwrap_list, get_matrix, matrix_as_df
from typing import Union
import pandas as pd
from copy import deepcopy
from pyanp.limitmatrix import normalize, calculus, priority_from_limit
import numpy as np
import re
from pyanp.rating import Rating
class ANPNode:
'''
A node inside a cluster, inside a netowrk. The basic building block of
an ANP netowrk.
:param network: An ANPNetwork object that this node lives inside.
:param cluster: An ANPCluster object that this node lives inside.
:param name: The name of this node.
'''
def __init__(self, network, cluster, name:str):
self.name = name
self.cluster = cluster
self.network = network
self.node_prioritizers = {}
self.subnetwork = None
self.invert = False
def is_node_cluster_connection(self, dest_cluster:str)->bool:
'''
Is this node connected to a cluster.
:param dest_cluster: The name of the cluster
:return: True/False
'''
if dest_cluster in self.node_prioritizers:
return True
else:
return False
def node_connect(self, dest_node)->None:
''''
Make a node connection from this node to dest_node
:param dest_node: The destination node as a str, int, or ANPNode. It
can be a list of nodes, and then we will coonect each node from
this node. The dest_node should be in any format accepted by
ANPNetwork._get_node()
'''
if islist(dest_node):
for dn in dest_node:
self.node_connect(dn)
else:
prioritizer = self.get_node_prioritizer(dest_node, create=True)
prioritizer.add_alt(dest_node, ignore_existing=True)
#Make sure parent clusters are connected
src_cluster = self.cluster
dest_cluster = self.network._get_node_cluster(dest_node)
src_cluster.cluster_connect(dest_cluster)
def get_node_prioritizer(self, dest_node, create=False,
create_class=Pairwise, dest_is_cluster=False)->Prioritizer:
'''
Gets the node prioritizer for the other_node
:param dest_node: The node as a int, str, or ANPNode object.
:return: The prioritizer if it exists, or None
'''
if dest_is_cluster:
dest_cluster = self.network.cluster_obj(dest_node)
dest_name = dest_cluster.name
else:
dest_cluster = self.network._get_node_cluster(dest_node)
dest_name = dest_cluster.name
if dest_name not in self.node_prioritizers:
if create:
prioritizer = create_class()
self.node_prioritizers[dest_name] = prioritizer
return prioritizer
else:
return None
else:
return self.node_prioritizers[dest_name]
def is_node_node_connection(self, dest_node)->bool:
'''
Checks if there is a node connection from this node to dest_node
:param dest_node: The node as a int, str, or ANPNode object.
:return:
'''
pri = self.get_node_prioritizer(dest_node)
if pri is None:
return False
elif not pri.is_alt(dest_node):
return False
else:
return True
def get_unscaled_column(self, username=None)->pd.Series:
'''
Returns the column in the unscaled supermatrix for this node.
:param username: The user/users to do this for. Typical Prioritizer
calculation usage, i.e. None means do for all group average.
:return: A pandas series indexed by the node names.
'''
nnodes = self.network.nnodes()
rval = pd.Series(data=[0.0]*nnodes, index=self.network.node_names())
prioritizer:Prioritizer
for prioritizer in self.node_prioritizers.values():
vals = prioritizer.priority(username, PriorityType.NORMALIZE)
for alt, val in vals.iteritems():
rval[alt] = val
return rval
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
pri:Prioritizer
for pri in self.node_prioritizers.values():
pri.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def set_node_prioritizer_type(self, destNode, prioritizer_class):
'''
Sets the node prioritizer type
:param destNode: An ANPNode object, string, or integer location
:param prioritizer_class: The new type
:return: None
'''
pri = self.get_node_prioritizer(destNode, create_class=prioritizer_class)
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
dest_cluster = self.network._get_node_cluster(destNode)
dest_name = dest_cluster.name
self.node_prioritizers[dest_name] = rval
else:
pass
class ANPCluster:
'''
A cluster in an ANP object
:param network: The ANPNetowrk object this cluster is in.
:param name: The name of the cluster to create.
'''
def __init__(self, network, name:str):
self.prioritizer = Pairwise()
self.name = name
self.network = network
# The list of ANP nodes in this cluster
self.nodes = {}
def add_node(self, *nodes)->None:
"""
Adds one or more nodes
:param nodes: A vararg list of node names to add to this cluster.
The names should all be strings.
:return: Nonthing
"""
nodes = unwrap_list(nodes)
if islist(nodes):
for node in nodes:
if isinstance(node, str):
self.add_node(node)
else:
self.nodes[nodes] = ANPNode(self.network, self, nodes)
def nnodes(self)->int:
"""
:return: The number of nodes in this cluster.
"""
return len(self.nodes)
def is_node(self, node_name:str)->bool:
'''
Does a node by that name exist in this cluster
:param node_name: The name of the node to look for
:return: True/False
'''
return node_name in self.nodes
def node_obj(self, node_name):
"""
Get a node in this cluster.
:param node_name: The node as either a string name, integer position, or
simply the ANPObject, in which case there is nothing to do except
return it.
:return: ANPNode object. If it wasn't found, None is returned.
"""
if isinstance(node_name, ANPNode):
return node_name
else:
return get_item(self.nodes, node_name)
def node_names(self)->list:
'''
:return: List of the string names of the nodes in this cluster
'''
return list(self.nodes.keys())
def node_objs(self)->list:
'''
:return: List of the ANPNode objects in this cluster.
'''
return self.nodes.values()
def cluster_connect(self, dest_cluster)->None:
"""
Make a cluster->cluster connection from this node to the destination.
:param dest_cluster: Either the ANPCluster object to connect to, or
the name of the destination cluster.
:return:
"""
if isinstance(dest_cluster, ANPCluster):
dest_cluster_name = dest_cluster.name
else:
dest_cluster_name = dest_cluster
self.prioritizer.add_alt(dest_cluster_name, ignore_existing=True)
def set_prioritizer_type(self, prioritizer_class)->None:
'''
Sets the cluster prioritizer type
:param prioritizer_class: The new type
:return: None
'''
pri = self.prioritizer
if not isinstance(pri, prioritizer_class):
#Wrong type, get alts from this one, and create correct one
rval = prioritizer_class()
rval.add_alt(pri.alt_names())
self.prioritizer = rval
else:
pass
def data_names(self, append_to=None):
'''
Used when exporting an Excel header for a network, for its data.
:param append_to: If not None, append header strings to this list.
Otherwise we create a new list to append to.
:return: List of strings of comparison name headers. If append_to is not
None, we return append_to with the new string headers appended.
'''
if append_to is None:
append_to = []
if self.prioritizer is not None:
self.prioritizer.data_names(append_to, post_pend="wrt "+self.name)
return append_to
def get_item(tbl:dict, key):
"""
Looks up an item in a dictionary by key first, assuming the key is in the
dictionary. Otherwise, it checks if the key is an integer, and returns
the item in that position.
:param tbl: The dictionary to look in
:param key: The key, or integer position to get the item of
:return: The item, or it not found, None
"""
if key in tbl:
return tbl[key]
elif not isinstance(key, int):
return None
# We have an integer key by this point
if key < 0:
return None
elif key >= len(tbl):
return None
else:
count = 0
for rval in tbl.values():
if count == key:
return rval
count+=1
#Should never make it here
raise ValueError("Shouldn't happen in anp.get_item")
__CLEAN_SPACES_RE = re.compile('\\s+')
def clean_name(name:str)->str:
"""
Cleans up a string for usage by:
1. stripping off begging and ending spaces
2. All spaces convert to one space
3. \t and \n are treated like a space
:param name: The string name to be cleaned
:return: The cleaned name.
"""
rval = name.strip()
return __CLEAN_SPACES_RE.sub(string=rval, repl=' ')
def sum_subnetwork_formula(priorities:pd.Series, dict_of_series:dict):
"""
A function that takes the weighted sum of values. Used for synthesis.
:param priorities: Series whose index are the nodes with subnetworks and
values are their weights.
:param dict_of_series: A dictionary whose keys are the same as the keys of
priorities, i.e. the nodes with subnetworks. The values are Series
whose keys are alternative names and values are the synthesized
alternative scores under that subnetwork.
:return:
"""
subpriorities = priorities[dict_of_series.keys()]
if sum(subpriorities) != 0:
subpriorities /= sum(subpriorities)
rval = pd.Series()
counts = pd.Series(dtype=int)
for subnet_name, vals in dict_of_series.items():
priority = subpriorities[subnet_name]
for alt_name, val in vals.iteritems():
if alt_name in rval:
rval[alt_name] += val * priority
counts[alt_name] += priority
else:
rval[alt_name] = val
counts[alt_name] = priority
# Now let's calculate the averages
for alt_name, val in rval.iteritems():
if counts[alt_name] > 0:
rval[alt_name] /= counts[alt_name]
return rval
class ANPNetwork(Prioritizer):
'''
Represents an ANP prioritizer. Has clusters/nodes, comparisons, etc.
:param create_alts_cluster: If True (which is the default) we start with a
cluster that is the alternatives cluster. Otherwise the model starts
empty.
'''
def __init__(self, create_alts_cluster=True):
self.clusters = {}
if create_alts_cluster:
cl = self.add_cluster("Alternatives")
self.alts_cluster = cl
self.users=[]
self.limitcalc = calculus
self.subnet_formula = sum_subnetwork_formula
self.default_priority_type = None
def add_cluster(self, *args)->ANPCluster:
'''
Adds one or more clusters to a network
:param args: Can be either a single string, or a list of strings
:return: ANPCluster object or list of ANPCluster objects
'''
clusters = unwrap_list(args)
if islist(clusters):
rval = []
for cl in clusters:
rval.append(self.add_cluster(cl))
return rval
else:
#Adding a single cluster
cl = ANPCluster(self, clusters)
self.clusters[clusters] = cl
return cl
def cluster_names(self)->list:
'''
:return: List of string names of the clusters
'''
return list(self.clusters.keys())
def nclusters(self)->int:
'''
:return: The number of clusters in the network.
'''
return len(self.clusters)
def cluster_obj(self, cluster_info:Union[ANPCluster, str])->ANPCluster:
'''
Returns the cluster with given information
:param cluster_info: Either the name of the cluster object to get
or the cluster object, or its int position
:return: The ANPCluster object
'''
if isinstance(cluster_info, ANPCluster):
return cluster_info
else:
return get_item(self.clusters, cluster_info)
def add_node(self, cl, *nodes):
'''
Adds nodes to a cluster
:param cl: The cluster name or object
:param nodes: The name or names of the nodes
:return: Nothing
'''
cluster = self.cluster_obj(cl)
cluster.add_node(nodes)
def nnodes(self, cluster=None)->int:
"""
Returns the number of nodes in the network, or a cluster.
:param cluster: If None, we return the number of nodes in the network.
Otherwise this is the integer position, string name, or ANPCluster
object of the cluster to get the node count within.
:return: The count.
"""
if cluster is None:
rval = pd.Series()
for cname, cluster in self.clusters.items():
rval[cname] = cluster.nnodes()
return sum(rval)
else:
clobj = self.cluster_obj(cluster)
return clobj.nnodes()
def add_alt(self, alt_name:str):
"""
Adds an alternative to the model:
1. Adds the altenrative to alts_cluster if not None
2. For each node with a subnetwork, we add the alternative to that subnetwork.
:param alt_name: The name of the alternative to add
:return: Nothing
"""
if self.alts_cluster is not None:
self.add_node(self.alts_cluster, alt_name)
# We should add this alternative to each subnetwork
for node in self.node_objs_with_subnet():
node.subnetwork.add_alt(alt_name)
def is_user(self, uname)->bool:
'''
Checks if a user exists
:param uname: The name of the user to check for
:return: bool
'''
return uname in self.users
def is_alt(self, altname)->bool:
'''
Checks if an alternative exists
:param altname: The alterantive name to look for
:return: bool
'''
return self.alts_cluster.is_node(altname)
def add_user(self, uname, ignore_dupe=False):
'''
Adds a user to the system
:param uname: The name of the new user
:return: Nothing
:raise ValueError If the user already existed
'''
if islist(uname):
for un in uname:
self.add_user(un, ignore_dupe=ignore_dupe)
return
if self.is_user(uname):
if not ignore_dupe:
raise ValueError("User by the name "+uname+" already existed")
else:
return
self.users.append(uname)
def nusers(self)->int:
'''
:return: The number of users
'''
return len(self.users)
def user_names(self)->list:
'''
:return: List of names of the users
'''
return deepcopy(self.users)
def node_obj(self, node_name)->ANPNode:
'''
Gets the ANPNode object of the node with the given name
:param node_name: The name of the node to get, or it's overall integer
position, or the ANPNode object itself
:return: The ANPNode if it exists, or None
'''
if isinstance(node_name, ANPNode):
return node_name
elif isinstance(node_name, int):
#Reference by integer
node_pos = node_name
node_count = 0
for cluster in self.clusters.values():
rel_pos = node_pos - node_count
if rel_pos < cluster.nnodes():
return cluster.node_obj(rel_pos)
#If we make it here, we were out of bounds
return None
#Okay handle string node name
cluster: ANPCluster
for cname, cluster in self.clusters.items():
rval = cluster.node_obj(node_name)
if rval is not None:
return rval
#Made it here, the node didn't exist
return None
def _get_node_cluster(self, node)->ANPCluster:
'''
Gets the ANPCluster object a node lives in
:param node: The name/integer positions, or ANPNode object itself. See
node_obj() method for more details.
:return: The ANPCluster object this node lives in, or None if it doesn't
exist.
'''
n = self.node_obj(node)
if n is None:
# Could not find the node
return None
return n.cluster
def node_connect(self, src_node, dest_node):
'''
connects 2 nodes
:param src_node: Source node as prescribed by node_object() function
:param dest_node: Destination node as prescribed by node_object() function
:return: Nothing
'''
src = self.node_obj(src_node)
src.node_connect(dest_node)
def node_names(self, cluster=None)->list:
'''
Returns a list of nodes in this network, organized by cluster
:param cluster: If None, we get all nodes in network, else we get nodes
in that cluster, otherwise format as specified by cluster_obj() function.
:return: List of strs of node names
'''
if cluster is not None:
cl = self.cluster_obj(cluster)
return cl.node_names()
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_names()
for name in cnodes:
rval.append(name)
return rval
def node_objs(self)->list:
'''
Returns a list of ANPNodes in this network, organized by cluster
:return: List of strs of node names
'''
rval = []
cl:ANPCluster
for cl in self.clusters.values():
cnodes = cl.node_objs()
for name in cnodes:
rval.append(name)
return rval
def cluster_objs(self)->list:
"""
:return: List of ANPCluster objects in the network
"""
return list(self.clusters.values())
def node_connections(self)->np.ndarray:
"""
Returns the node conneciton matrix for this network.
:return: A numpy array of shape [nnode, nnodes] where item [row, col]
1 means there is a node connection from col -> row, and 0 means
no connection.
"""
nnodes = self.nnodes()
nnames = self.node_names()
rval = | np.zeros([nnodes, nnodes]) | numpy.zeros |
#!/usr/bin/env python3
from shared_setting import *
import sys
import csv
import numpy
threshold=5.0
def sigmoid(x):
return 1.0/(1.0+numpy.exp(-(x-threshold)))
#parameters
save_pitch=10.0 #ms
save_pitch_weight=60.0*1000.0 #ms
etaEsom=1.0
etaEdnd=1.0
#etaIdnd=1.0e-2*Ndndinh
#thetainh=0.5
#alpha=0.9
#beta_som=2.5
#beta_dnd=2.5
#gamma=1.0
phi=100.0/1000.0 #kHz
tauL=10.0 #ms
taudeltaW=1.0*1000.0 #ms
tauM=60.0*1000.0 #ms
Wmin=0.0
r0=0.05
c0=70.0
eta_Wdecay=1e-7
tauSTD=500.0 #ms
tauSTF=200.0 #ms
USTF=[0.5, 0.03] #[no Ach, Ach]
#connections
WEEsom=numpy.loadtxt("WEEsom_init.csv", delimiter=",")
deltaWEEsom=numpy.zeros_like(WEEsom)
WEEdnd=numpy.loadtxt("WEEdnd_init.csv", delimiter=",")
deltaWEEdnd=numpy.zeros_like(WEEdnd)
WEIsom=numpy.loadtxt("WEIsom_init.csv", delimiter=",")
WEIdnd=numpy.loadtxt("WEIdnd_init.csv", delimiter=",")
#deltaWEIdnd=numpy.zeros_like(WEIdnd)
WIEsom=numpy.loadtxt("WIEsom_init.csv", delimiter=",")
WIEdnd=numpy.loadtxt("WIEdnd_init.csv", delimiter=",")
#variables
x=numpy.zeros(NE)
y=numpy.zeros(NE)
Ex=r0*numpy.ones(NE)
Ey=r0*numpy.ones(NE)
STDrecur=numpy.ones(NE)
STFrecur=USTF[0]*numpy.ones(NE)
PSCrecur=numpy.zeros(NE)
Iext_som=numpy.zeros(NE)
Iext_dnd=numpy.zeros(NE)
Isominh=numpy.zeros(Nsominh)
#Idndinh=numpy.zeros(Ndndinh)
Iinput=numpy.zeros(Ninput)
STDinput=numpy.ones(Ninput)
STFinput=USTF[0]*numpy.ones(Ninput)
PSCinput= | numpy.zeros(Ninput) | numpy.zeros |
""" Discrete Lehman Representation (DLR) implementation
using Numpy and Scipy."""
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing
permissions and limitations under the License."""
import numpy as np
import numpy.polynomial.legendre as leg
from scipy.linalg import eigh as scipy_eigh
from scipy.linalg import lu_solve, lu_factor
from .kernel import kernel, KernelInterpolativeDecoposition
class dlr(object):
"""
Discrete Lehmann Representation (DLR) class.
Provides the DRL basis, transforms, and algorithms.
Parameters
----------
lamb : float
DLR scale parameter :math:`\\Lambda`.
eps : float
Set accuracy of the DLR representation.
xi : sign, optional,
Statistical sign :math:`\\xi = \\pm 1` for bosons and fermions respectively.
max_rank : int, optional
Maximum rank of the DLR kernel decomposition. Default 500.
nmax : int, optional
Maxumum index of the Matsubara frequency grid. Default int(lamb).
verbose : bool, optional
Default `False`.
python_impl : bool, optional
Switch between the python and fortran library driver. Default `True`.
"""
def __init__(self, lamb, eps=1e-15, xi=-1,
max_rank=500, nmax=None, verbose=False, python_impl=True):
self.xi = xi
self.lamb = lamb
self.eps = eps
if not python_impl:
from .kernel_fortran import KernelInterpolativeDecopositionFortran
KID = KernelInterpolativeDecopositionFortran
else:
KID = KernelInterpolativeDecoposition
kid = KID(lamb, eps=eps, xi=xi, max_rank=max_rank, nmax=nmax, verbose=verbose)
members = [
'rank', 'dlrit', 'dlrrf', 'dlrmf',
'dlrit2cf', 'it2cfpiv', 'dlrmf2cf', 'mf2cfpiv', 'T_lx', 'T_qx',
]
for member in members: setattr(self, member, getattr(kid, member))
del kid
# -- Split real-frequency nodes (assuming sorted)
self.pm_idx = np.argwhere(self.dlrrf > 0)[0,0]
self.dlrrf_p = self.dlrrf[self.pm_idx:]
self.dlrrf_m = self.dlrrf[:self.pm_idx]
self.kernel_nominator_p = 1 / (1 + np.exp(-self.dlrrf_p))
self.kernel_nominator_m = 1 / (1 + np.exp(+self.dlrrf_m))
# -- Auxilliary variables
tau_l = self.get_tau(1.)
w_x = self.dlrrf
I = np.eye(len(w_x))
self.W_xx = 1. / (I + w_x[:, None] - w_x[None, :]) - I
self.k1_x = -np.squeeze(kernel(np.ones(1), w_x))
self.TtT_xx = self.dlr_from_tau(tau_l[:, None] * self.T_lx)
self.__bosonic_corr_freq = lambda omega: np.tanh(0.5 * omega)
self.bosonic_corr_x = self.__bosonic_corr_freq(self.dlrrf)
self.W_bc_xx = self.W_xx * self.bosonic_corr_x[:, None]
self.TtT_bc_xx = self.TtT_xx * self.bosonic_corr_x[None, :]
def __len__(self):
"""The length of the DLR expansion
Returns
-------
rank : int
Number of DLR expansion coefficients.
"""
return self.rank
def __xi_arg(self, xi):
"""Internal helper function to filter xi arguments."""
if xi is None: xi = self.xi
assert( np.abs(xi) == 1 and type(xi) == int )
return xi
def get_dlr_frequencies(self):
"""Get real frequency DLR grid :math:`\\omega_x \\in [-\\Lambda, \\Lambda]`
Returns
-------
w_x : (n), ndarray
Scaled real frequency points :math:`\\omega_x`
for the DLR representation.
"""
return self.dlrrf
# -- Imaginary time
def get_tau_over_beta(self):
"""Get grid in scaled imaginary time :math:`\\tau/\\beta \\in [0, 1]`
Returns
-------
ttau_l : (n), ndarray
Scaled imaginary time points :math:`\\tilde{\\tau}_l = \\tau_l / \\beta`
for the DLR representation, :math:`\\tilde{\\tau}_l \\in [0, 1]`.
"""
ttau_l = self.dlrit
return ttau_l
def get_tau(self, beta):
"""Get :math:`\\tau`-grid in imaginary time :math:`\\tau \\in [0, \\beta]`.
Parameters
----------
beta : float
Inverse temperature :math:`\\beta`
Returns
-------
tau_l : (n), ndarray
Imaginary time points :math:`\\tau_l` for the DLR representation, :math:`\\tau_l \\in [0, \\beta]`.
"""
tau_l = self.get_tau_over_beta() * beta
return tau_l
def dlr_from_tau(self, G_laa):
"""Transform the rank-3 array_like Green's function `G_laa` from imaginary time to DLR space.
Parameters
----------
G_laa : (n,m,m), array_like
Green's function in imaginary time with :math:`m \\times m` orbital indices.
Returns
-------
G_xaa : (n,m,m), ndarray
Green's function i DLR coefficient space with :math:`m \\times m` orbital indices.
"""
G_xaa = lu_solve((self.dlrit2cf, self.it2cfpiv), G_laa)
return G_xaa
def tau_from_dlr(self, G_xaa):
"""Transform the rank-3 array_like Green's function `G_xaa` from DLR space to imaginary time.
Parameters
----------
G_xaa : (n,m,m), array_like
Green's function in DLR coefficient space with :math:`m \\times m` orbital indices.
Returns
-------
G_laa : (n,m,m), ndarray
Green's function in imaginary time with :math:`m \\times m` orbital indices.
"""
G_laa = np.tensordot(self.T_lx, G_xaa, axes=(1, 0))
return G_laa
def tau_from_legendre(self, G_naa):
"""Transform the rank-3 array_like Green's function `G_naa` from Legendre coefficient space to imaginary time.
Parameters
----------
G_naa : (n,m,m), array_like
Green's function in Legendre coefficient space with :math:`m \\times m` orbital indices.
Returns
-------
G_laa : (n,m,m), ndarray
Green's function in imaginary time with :math:`m \\times m` orbital indices.
"""
x = 2 * self.get_tau_over_beta() - 1
G_laa = np.rollaxis(leg.legval(x, G_naa), -1)
return G_laa
def eval_dlr_tau(self, G_xaa, tau_k, beta):
"""Evaluate the DLR coefficient Green's function `G_xaa` at arbibrary points in imaginary time.
Parameters
----------
G_xaa : (n,m,m), array_like
Green's function in DLR coefficient space with :math:`m \\times m` orbital indices.
tau_k : (k), array_like
Imaginary time points :math:`\\tau_k` where to evaluate the Green's function.
beta : float
Inverse temperature :math:`\\beta`
Returns
-------
G_kaa : (k,m,m), ndarray
Green's function at the imaginary time points :math:`\\tau_k` with :math:`m \\times m` orbital indices.
"""
tau_k = tau_k[:, None] / beta
w_p = self.dlrrf_p[None, :]
K_kp = np.exp(-tau_k*w_p) * self.kernel_nominator_p[None, :]
G_kaa = np.einsum('kp,p...->k...', K_kp, G_xaa[self.pm_idx:])
w_m = self.dlrrf_m[None, :]
K_km = np.exp((1 - tau_k)*w_m) * self.kernel_nominator_m[None, :]
G_kaa += np.einsum('km,m...->k...', K_km, G_xaa[:self.pm_idx])
return G_kaa
def lstsq_dlr_from_tau(self, tau_i, G_iaa, beta):
"""Return DLR coefficients by least squares fit to values on arbitrary imaginary time grid.
Parameters
----------
tau_i : (i), array_like
Imaginary time points :math:`\\tau_i` where the Green's function is sampled.
G_iaa : (i,m,m), array_like
Green's function in imaginary time space :math:`G(\\tau_i)` with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
Returns
-------
G_xaa : (k,m,m), ndarray
Green's function in DLR coefficient space with :math:`m \\times m` orbital indices.
"""
shape_iaa = G_iaa.shape
assert(len(shape_iaa) == 3)
shape_iA = (shape_iaa[0], shape_iaa[1]*shape_iaa[2])
shape_xaa = (len(self), shape_iaa[1], shape_iaa[2])
K_ix = kernel(tau_i/beta, self.dlrrf)
G_xaa = np.linalg.lstsq(
K_ix, G_iaa.reshape(shape_iA),
rcond=None)[0].reshape(shape_xaa)
return G_xaa
# -- Matsubara Frequency
def get_matsubara_frequencies(self, beta):
"""Get Matsubara frequency grid.
Parameters
----------
beta : float
Inverse temperature :math:`\\beta`
Returns
-------
w_q : (n), ndarray
Matsubara frequency points :math:`i\\omega_q` for the DLR representation.
"""
zeta = (1 - self.xi)/2
w_q = 1.j * np.pi/beta * (2*self.dlrmf + zeta)
return w_q
def dlr_from_matsubara(self, G_qaa, beta, xi=None):
"""Transform the rank-3 array_like Green's function `G_qaa` from Matsbuara frequency to DLR space.
Parameters
----------
G_qaa : (n,m,m), array_like
Green's function in Matsubara frequency with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
G_xaa : (n,m,m), ndarray
Green's function i DLR coefficient space with :math:`m \\times m` orbital indices.
"""
xi = self.__xi_arg(xi)
G_xaa = lu_solve((self.dlrmf2cf, self.mf2cfpiv), G_qaa.conj() / beta)
if xi == 1: G_xaa /= self.bosonic_corr_x[:, None, None]
return G_xaa
def matsubara_from_dlr(self, G_xaa, beta, xi=None):
"""Transform the rank-3 array_like Green's function `G_xaa` from DLR space to Matsbuara frequency.
Parameters
----------
G_xaa : (n,m,m), array_like
Green's function i DLR coefficient space with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
G_qaa : (n,m,m), ndarray
Green's function in Matsubara frequency with :math:`m \\times m` orbital indices.
"""
xi = self.__xi_arg(xi)
if xi == 1:
G_qaa = beta * np.tensordot(
self.T_qx * self.bosonic_corr_x[None, :], G_xaa, axes=(1, 0))
else:
G_qaa = beta * np.tensordot(self.T_qx, G_xaa, axes=(1, 0))
if len(G_qaa.shape) == 3: G_qaa = np.transpose(G_qaa, axes=(0, 2, 1))
G_qaa = G_qaa.conj()
return G_qaa
def eval_dlr_freq(self, G_xaa, z, beta, xi=None):
"""Evaluate the DLR coefficient Green's function `G_xaa` at arbibrary points `z` in frequency space.
Parameters
----------
G_xaa : (n,m,m), array_like
Green's function in DLR coefficient space with :math:`m \\times m` orbital indices.
z : (k), array_like
Frequency points :math:`z_k` where to evaluate the Green's function.
beta : float
Inverse temperature :math:`\\beta`
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
G_zaa : (k,m,m), ndarray
Green's function at the frequency points :math:`z_k` with :math:`m \\times m` orbital indices.
"""
xi = self.__xi_arg(xi)
w_x = self.dlrrf / beta
kernel_zx = 1./(z[:, None] + w_x[None, :])
if xi == 1: kernel_zx *= self.bosonic_corr_x[None, :]
G_zaa = np.einsum('x...,zx->z...', G_xaa, kernel_zx)
if len(G_zaa.shape) == 3: G_zaa = np.transpose(G_zaa, axes=(0, 2, 1))
G_zaa = G_zaa.conj()
return G_zaa
def lstsq_dlr_from_matsubara(self, w_q, G_qaa, beta):
"""Return DLR coefficients by least squares fit to values on arbitrary Matsubara frequency grid.
Parameters
----------
w_q : (q), array_like
Imaginary frequency points :math:`i\\omega_q` where the Green's function is sampled.
G_qaa : (q,m,m), array_like
Green's function in imaginary frequency space :math:`G(i\\omega_q)` with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
Returns
-------
G_xaa : (k,m,m), ndarray
Green's function in DLR coefficient space with :math:`m \\times m` orbital indices.
"""
shape_qaa = G_qaa.shape
assert(len(shape_qaa) == 3)
shape_qA = (shape_qaa[0], shape_qaa[1]*shape_qaa[2])
shape_xaa = (len(self), shape_qaa[1], shape_qaa[2])
K_qx = -1./(w_q[:, None] - self.dlrrf[None, :]/beta)
G_xaa = np.linalg.lstsq(
K_qx, G_qaa.reshape(shape_qA),
rcond=None)[0].reshape(shape_xaa)
return G_xaa
# -- Mathematical operations
def quadratic_hamiltonian(self, G_xaa, beta, xi=None):
""" Get quadratic Hamiltonian contribution of physical Green's function.
Parameters
----------
G_xaa : (k,m,m), ndarray
Green's function in DLR coefficient space with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
Returns
-------
H_aa : (m,m), ndarray
Quadratic Hamiltonian contribution to the Green's function.
"""
xi = self.__xi_arg(xi)
w_x = self.dlrrf
K0_x = kernel(np.array([0.]), w_x)
K1_x = kernel(np.array([1.]), w_x)
D_x = (-K0_x + xi * K1_x) * w_x / beta
H_aa = np.tensordot(D_x, G_xaa, axes=([-1, 0]))[0]
return H_aa
def convolution(self, A_xaa, B_xaa, beta, xi=None):
""" DLR convolution with :math:`\mathcal{O}(N^2)` scaling. Author: <NAME> (2021)
Imaginary time convolution
.. math:: C = A \\ast B
reformulated in DLR coefficient space. The notation :math:`C = A \\ast B` is short hand for:
.. math:: C_{ij}(\\tau) = \\sum_k \\int_{0}^\\beta d\\bar{\\tau} A_{ik}(\\tau - \\bar{\\tau}) B_{kj}(\\bar{\\tau})
Parameters
----------
A_xaa : (n,m,m)
Green's function :math:`A` in DLR coefficient space with :math:`m \\times m` orbital indices.
B_xaa : (n,m,m)
Green's function :math:`B` in DLR coefficient space with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
C_xaa : (n,m,m), ndarray
Green's function :math:`C` in DLR coefficient space with :math:`m \\times m` orbital indices,
given by the convolution :math:`C = A \\ast B`.
"""
xi = self.__xi_arg(xi)
W_xx = self.W_xx if xi == -1 else self.W_bc_xx
TtT_xx = self.TtT_xx if xi == -1 else self.TtT_bc_xx
n, na, _ = A_xaa.shape
WA_xaa = np.matmul(W_xx.T, A_xaa.reshape((n, na*na))).reshape((n, na, na))
C_xaa = np.matmul(WA_xaa, B_xaa)
del WA_xaa
WB_xaa = np.matmul(W_xx.T, B_xaa.reshape((n, na*na))).reshape((n, na, na))
C_xaa += np.matmul(A_xaa, WB_xaa)
del WB_xaa
AB_xaa = np.matmul(A_xaa, B_xaa)
C_xaa += -xi * self.k1_x[:, None, None] * AB_xaa
C_xaa += np.matmul(TtT_xx, AB_xaa.reshape((n, na*na))).reshape((n, na, na))
del AB_xaa
C_xaa *= beta
return C_xaa
def convolution_matrix(self, A_xaa, beta, xi=None):
""" DLR convolution matrix with :math:`\mathcal{O}(N^2)` scaling. Author: <NAME> (2021)
The imaginary time convolution matrix :math:`M` is given by
.. math:: M = [A \\ast]
i.e. the combination of the Green's function :math:`A` and the convolution operator :math:`\\ast`.
Author: <NAME> (2021)
Parameters
----------
A_xaa : (n,m,m)
Green's function :math:`A` in DLR coefficient space with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
M_xaxa : (n,m,n,m), ndarray
Convolution matrix :math:`[A \\ast]` as a rank-4 tensor in DLR and orbital space.
"""
xi = self.__xi_arg(xi)
W_xx = self.W_xx if xi == -1 else self.W_bc_xx
TtT_xx = self.TtT_xx if xi == -1 else self.TtT_bc_xx
n, na, _ = A_xaa.shape
Q_xaa = np.einsum('yx,yab->xab', W_xx, A_xaa)
Q_xaa += -xi * self.k1_x[:,None,None] * A_xaa
M_xxaa = np.einsum( 'xy,yab->yxab', W_xx, A_xaa)
M_xxaa += np.einsum('xy,yab->xyab', TtT_xx, A_xaa)
M_xxaa += np.einsum('xy,yab->xyab', np.eye(n), Q_xaa)
M_xxaa *= beta
M_xaxa = np.moveaxis(M_xxaa, 2, 1)
return M_xaxa
# -- Free Green's function solvers
def free_greens_function_dlr(self, H_aa, beta, S_aa=None, xi=None):
""" Return the free Green's function in DLR coefficent space.
The free Green's function is the solution to the Dyson differential equation
.. math:: (-\\partial_\\tau - H_{ij}) G_{jk}(\\tau) = \\delta(\\tau)
Parameters
----------
H_aa : (m,m), array_like
Single-particle Hamiltonian matrix in :math:`m \\times m` orbital space.
beta : float
Inverse temperature :math:`\\beta`
S_aa : (m,m), array_like, optional
Overlap matrix for the generalized case of non-orthogonal basis functions,
where the Dyson equation takes the form
:math:`(-S_{ij} \\partial_\\tau - H_{ij}) G_{jk}(\\tau) = \\delta(\\tau)`.
Default is `S_aa = None`.
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
G_xaa : (n,m,m), ndarray
Free Green's function :math:`G` in DLR coefficient space with :math:`m \\times m` orbital indices.
Notes
-----
The fastest algorithm for calculation of free Green's functions is the `free_greens_function_tau` method.
"""
xi = self.__xi_arg(xi)
na = H_aa.shape[0]
I_aa = np.eye(na)
if S_aa is None: S_aa = I_aa
w_x = self.dlrrf
n = len(w_x)
D_lx = self.T_lx * w_x[None, :] / beta
D_AA = np.kron(D_lx, S_aa) - np.kron(self.T_lx, H_aa)
bc_x = kernel(np.array([0.]), w_x) - xi * kernel(np.array([1.]), w_x)
D_AA[(n-1)*na:, :] = np.kron(bc_x, S_aa)
b_Aa = np.zeros((n*na, na))
b_Aa[(n-1)*na:, :] = -I_aa
g_xaa = np.linalg.solve(D_AA, b_Aa).reshape((n, na, na))
return g_xaa
def free_greens_function_tau(self, H_aa, beta, S_aa=None, xi=None):
""" Return the free Green's function in imaginary time.
The free Green's function is the solution to the Dyson differential equation
.. math:: (-\\partial_\\tau - H_{ij}) G_{jk}(\\tau) = \\delta(\\tau)
Parameters
----------
H_aa : (m,m), array_like
Single-particle Hamiltonian matrix in :math:`m \\times m` orbital space.
beta : float
Inverse temperature :math:`\\beta`
S_aa : (m,m), array_like, optional
Overlap matrix for the generalized case of non-orthogonal basis functions,
where the Dyson equation takes the form
:math:`(-S_{ij} \\partial_\\tau - H_{ij}) G_{jk}(\\tau) = \\delta(\\tau)`.
Default is `S_aa = None`.
xi : int, optional
Statistics sign, :math:`\\xi = +1` for bosons and :math:`\\xi = -1` for fermions.
Returns
-------
G_laa : (n,m,m), ndarray
Free Green's function :math:`G` in imaginary time with :math:`m \\times m` orbital indices.
"""
xi = self.__xi_arg(xi)
w_x = self.dlrrf
if S_aa is None:
E, U = np.linalg.eigh(H_aa)
else:
E, U = scipy_eigh(H_aa, S_aa)
tau_l = self.get_tau(1.)
g_lE = -kernel(tau_l, E*beta)
if xi == 1: g_lE /= self.__bosonic_corr_freq(E*beta)[None, :]
g_laa = np.einsum('lE,aE,Eb->lab', g_lE, U, U.T.conj())
return g_laa
def free_greens_function_matsubara(self, H_aa, beta, S_aa=None):
""" Return the free Green's function in Matsubara frequency.
The free Green's function is the solution to the Dyson equation
.. math:: G_{ij}(i \\omega_n ) = \\left[ i\\omega_n - H_{ij} \\right]^{-1}
Parameters
----------
H_aa : (m,m), array_like
Single-particle Hamiltonian matrix in :math:`m \\times m` orbital space.
beta : float
Inverse temperature :math:`\\beta`
S_aa : (m,m), array_like, optional
Overlap matrix for the generalized case of non-orthogonal basis functions,
where the Dyson equation takes the form
:math:`= G_{jk}(i\\omega_n) = (-S_{ij} i\\omega_n - H_{ij})^{-1}`.
Default is `S_aa = None`.
Returns
-------
G_qaa : (n,m,m), ndarray
Free Green's function :math:`G` in Matsubara frequency with :math:`m \\times m` orbital indices.
Notes
-----
The fastest algorithm for calculation of free Green's functions is the `free_greens_function_tau` method.
"""
if S_aa is None: S_aa = np.eye(H_aa.shape[0])
w_q = self.get_matsubara_frequencies(beta)
g_qaa = np.linalg.inv(w_q[:, None, None] * S_aa[None, ...] - H_aa[None, ...])
return g_qaa
# -- Dyson equation solvers
def dyson_matsubara(self, H_aa, Sigma_qaa, beta, S_aa=None):
""" Solve the Dyson equation in Matsubara frequency.
The Dyson equation gives the Green's function as
.. math:: G_{ij}(i \\omega_n ) = \\left[ i\\omega_n - H_{ij} - \\Sigma(i\\omega_n) \\right]^{-1}
Parameters
----------
H_aa : (m,m), array_like
Single-particle Hamiltonian matrix in :math:`m \\times m` orbital space.
Sigma_qaa : (n,m,m), ndarray
Self-energy :math:`\\Sigma` in Matsubara frequency with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
S_aa : (m,m), array_like, optional
Overlap matrix for the generalized case of non-orthogonal basis functions. Default is `S_aa = None`.
Returns
-------
G_qaa : (n,m,m), ndarray
Green's function :math:`G` in Matsubara frequency with :math:`m \\times m` orbital indices.
Notes
-----
The Matsubara frequency Dyson solver is the fastest Dyson solver,
albeit not as accurate as the DLR solver `dyson_dlr`.
"""
if S_aa is None: S_aa = np.eye(H_aa.shape[0])
w_q = self.get_matsubara_frequencies(beta)
G_qaa = np.linalg.inv(w_q[:, None, None] * S_aa[None, ...] - H_aa[None, ...] - Sigma_qaa)
return G_qaa
def dyson_dlr(self, H_aa, Sigma_xaa, beta, S_aa=None,
iterative=False, lomem=False, verbose=False, tol=1e-12):
""" Solve the Dyson equation in DLR coefficient space.
The Dyson equation gives the imaginary time Green's function :math:`G_{ij}(\\tau)` as
.. math:: (-\\partial_\\tau - H_{ij} - \\Sigma_{ij} \\ast \\, ) G_{jk}(\\tau) = \\delta(\\tau)
Using the free Green's function :math:`g` this can be rewritten as the integral equation
.. math:: (1 - g \\ast \\Sigma \\ast \\, ) G = g
which here is solved directly in DLR coefficient space.
Parameters
----------
H_aa : (m,m), array_like
Single-particle Hamiltonian matrix in :math:`m \\times m` orbital space.
Sigma_xaa : (n,m,m), ndarray
Self-energy :math:`\\Sigma` in DLR coefficient space with :math:`m \\times m` orbital indices.
beta : float
Inverse temperature :math:`\\beta`
S_aa : (m,m), array_like, optional
Overlap matrix for the generalized case of non-orthogonal basis functions. Default is `S_aa = None`.
iterative : bool, optional
Default `False`
lomem : bool, optional
Default `False`
tol : float, optional
Tolerance for iterative GMRES solver.
Returns
-------
G_xaa : (n,m,m), ndarray
Green's function :math:`G` in DLR coefficient space with :math:`m \\times m` orbital indices.
Notes
-----
This DLR space Dyson solver is the most accurate algorithm for solving the Dyson equation.
By default it uses Lapack's direct solver on matrix problem in combined DLR and orbital space.
This is fast and accurate for small problems. For larger problems the :math:`\\mathcal{O}(N^2M^2)`
memory foot-print limits the performance.
Hence, for large problems the solver should be run with `iterative=True` and `lomem=True` and will
then use GMRES to solve the linear system using an implicit matrix formulation.
This formulation gives a memory foot print that is of the same order as storing a single Green's function.
However, the requested GMRES tolerance `tol` has to be carefully tuned.
"""
na = H_aa.shape[0]
n = Sigma_xaa.shape[0]
I_aa = | np.eye(na) | numpy.eye |
#!/usr/bin/env python3
# Using am_sensors/simulatedSensors
# [TODO]
# - Differentiate between std in static or moving behaviour
import math
from math import sin, cos, pi
import rospy
import tf
from std_msgs.msg import Header
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3, PoseWithCovariance, PoseWithCovarianceStamped, TwistWithCovariance
from sensor_msgs.msg import NavSatFix, Imu
from am_driver.msg import WheelEncoder
from am_driver.msg import SensorStatus, CurrentStatus
from nav_msgs.msg import Odometry
import math
import threading
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib as mpl
import seaborn as sns
from scipy.spatial.transform import Rotation as Rot
import numpy as np
import pymap3d as pm
# import the random module
import random
class AEKF_Sim():
def __init__(self):
# Define name of the Node
rospy.init_node("AEKF5_Sim", anonymous=True)
# Define the run type of the Filter
self.test = True
self.print = False
self.ros = True
# Define the self.lock to allow multi-threading
self.lock = threading.Lock()
# Check if the filter is ready to start
self.filter = False
# Get the current time
now = rospy.get_time()
# Kalman states
self.x_t = 0.0
self.y_t = 0.0
self.yaw_t = 0.0
self.x_dot_t = 0.0
self.yaw_dot_t = 0.0
# Frequency of the Kalman filter
self.rate = 250
# Steps to slowly account the control input
self.steps = 250
# State-Vector
self.X_t = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
self.X_Pred = self.X_t
self.X_control = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
self.X_wheel_odom = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
self.X_visual_odom = np.array([self.x_t, self.y_t, self.yaw_t,
self.x_dot_t, self.yaw_dot_t])
# Filter Covariance Matrix
self.P_t = np.eye(5)*1e-5
self.P_Pred = self.P_t
# Filter Innovation Matrix
self.K = np.diag(np.zeros(5))
# Initialise Measurements Vector
self.Z = np.array([])
# Initialise Measurements Covariance Matrix
self.R = np.array([])
# Initialise Measurements Matrix
self.H = np.zeros((5,0))
# Initialise Measurements Jacobian Matrix
self.J_H = np.zeros((5,0))
print("Initialised AEKF_Sim")
# Define set of topics to subscribe to
rospy.Subscriber('Odom_Ground', Odometry, self.GroundTruth)
self.ground_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
rospy.Subscriber('cmd_vel', Twist, self.Control)
self.control_fusion = True
self.control_measure = False
self.control_t = -1
self.control_state = np.array([0.0, 0.0])
rospy.Subscriber('current_status', CurrentStatus, self.CurrentStatus)
self.current_status = 1
rospy.Subscriber('/wheel_odometry/odom', Vector3, self.WheelOdometer)
self.wheel_fusion = True
self.wheel_odometer_measure = False
self.wheel_odometer_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.wheel_odometer_bias = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.wheel_odometer_var = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
rospy.Subscriber('/visual_odometry/odom', Vector3, self.VisualOdometer)
self.visual_fusion = True
self.visual_odometer_measure = False
self.visual_odometer_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.visual_odometer_bias = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.visual_odometer_var = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.gps_th = 0
rospy.Subscriber('automower_gps/GPSfix', Vector3, self.GPS)
self.gps_fusion = True
self.gps_measure = False
self.gps_state = np.array([0.0, 0.0, 0.0])
self.gps_bias = np.array([0.0, 0.0, 0.0])
self.gps_var = | np.array([0.0, 0.0, 0.0]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""hydrological methods powered by pyFlwDir"""
import warnings
import logging
import numpy as np
import xarray as xr
import geopandas as gpd
import pyflwdir
from typing import Tuple, Union, Optional
from . import gis_utils
logger = logging.getLogger(__name__)
__all__ = [
"flwdir_from_da",
"d8_from_dem",
"reproject_hydrography_like",
"stream_map",
"basin_map",
"outlet_map",
"clip_basins",
"upscale_flwdir",
"dem_adjust",
]
### FLWDIR METHODS ###
def flwdir_from_da(
da: xr.DataArray,
ftype: str = "infer",
check_ftype: bool = True,
mask: Union[xr.DataArray, bool, None] = None,
logger=logger,
):
"""Parse dataarray to flow direction raster object. If a mask coordinate is present
this will be passed on the the pyflwdir.from_array method.
Parameters
----------
da : xarray.DataArray
DataArray containing flow direction raster
ftype : {'d8', 'ldd', 'nextxy', 'nextidx', 'infer'}, optional
name of flow direction type, infer from data if 'infer', by default is 'infer'
check_ftype : bool, optional
check if valid flow direction raster if ftype is not 'infer', by default True
mask : xr.DataArray, bool, optional
Mask for gridded flow direction data, by default None.
If True, use the mask coordinate of `da`.
Returns
-------
flwdir : pyflwdir.FlwdirRaster
Flow direction raster object
"""
if not isinstance(da, xr.DataArray):
raise TypeError("da should be an instance of xarray.DataArray")
crs = da.raster.crs
if crs is None:
raise ValueError("da is missing CRS property, set using `da.raster.set_crs`")
latlon = crs.is_geographic
_crs = "geographic" if latlon else "projected"
_unit = "degree" if latlon else "meter"
logger.debug(f"Initializing flwdir with {_crs} CRS with unit {_unit}.")
if isinstance(mask, xr.DataArray):
mask = mask.values
elif isinstance(mask, bool) and mask and "mask" in da.coords:
# backwards compatibility for mask = True
mask = da["mask"].values
elif not isinstance(mask, np.ndarray):
mask = None
flwdir = pyflwdir.from_array(
data=da.squeeze().values,
ftype=ftype,
check_ftype=check_ftype,
mask=mask,
transform=da.raster.transform,
latlon=latlon,
)
return flwdir
def d8_from_dem(
da_elv: xr.DataArray,
gdf_stream: Optional[gpd.GeoDataFrame] = None,
max_depth: float = -1.0,
outlets: str = "edge",
idxs_pit: Optional[np.ndarray] = None,
) -> xr.DataArray:
"""Derive D8 flow directions grid from an elevation grid.
Outlets occur at the edge of valid data or at user defined cells (if `idxs_pit` is provided).
A local depressions is filled based on its lowest pour point level if the pour point
depth is smaller than the maximum pour point depth `max_depth`, otherwise the lowest
elevation in the depression becomes a pit.
Parameters
----------
da_elv: 2D xarray.DataArray
elevation raster
gdf_stream: geopandas.GeoDataArray, optional
stream vector layer with 'uparea' [km2] column which is used to burn
the river in the elevation data.
max_depth: float, optional
Maximum pour point depth. Depressions with a larger pour point
depth are set as pit. A negative value (default) equals an infinitely
large pour point depth causing all depressions to be filled.
outlets: {'edge', 'min'}
Position for basin outlet(s) at the all valid elevation edge cell ('edge')
or only the minimum elevation edge cell ('min')
idxs_pit: 1D array of int
Linear indices of outlet cells.
Returns
-------
da_flw: xarray.DataArray
D8 flow direction grid
See Also
--------
pyflwdir.dem.fill_depressions
"""
nodata = da_elv.raster.nodata
crs = da_elv.raster.crs
assert da_elv.raster.res[1] < 0
assert nodata is not None and ~np.isnan(nodata)
# burn in river if
if gdf_stream is not None and "uparea" in gdf_stream.columns:
gdf_stream = gdf_stream.sort_values(by="uparea")
dst_rivupa = da_elv.raster.rasterize(gdf_stream, col_name="uparea", nodata=0)
# make sure the rivers have a slope and are below all other elevation cells.
# river elevation = min(elv) - log10(uparea[m2]) from rasterized river uparea.
elvmin = da_elv.where(da_elv != nodata).min()
elvriv = elvmin - np.log10(np.maximum(1.0, dst_rivupa * 1e3))
# synthetic elevation with river burned in
da_elv = elvriv.where(np.logical_and(da_elv != nodata, dst_rivupa > 0), da_elv)
da_elv.raster.set_nodata(nodata)
da_elv.raster.set_crs(crs)
# derive new flow directions from (synthetic) elevation
d8 = pyflwdir.dem.fill_depressions(
da_elv.values.astype(np.float32),
max_depth=max_depth,
nodata=da_elv.raster.nodata,
outlets=outlets,
idxs_pit=idxs_pit,
)[1]
# return xarray data array
da_flw = xr.DataArray(
dims=da_elv.raster.dims,
coords=da_elv.raster.coords,
data=d8,
name="flwdir",
)
da_flw.raster.set_nodata(247)
da_flw.raster.set_crs(crs)
return da_flw
def upscale_flwdir(
ds: xr.Dataset,
flwdir: pyflwdir.FlwdirRaster,
scale_ratio: int,
method: str = "com2",
uparea_name: Optional[str] = None,
flwdir_name: str = "flwdir",
logger=logger,
**kwargs,
) -> Tuple[xr.DataArray, pyflwdir.FlwdirRaster]:
"""Upscale flow direction network to lower resolution.
Parameters
----------
ds : xarray.Dataset
Dataset flow direction.
flwdir : pyflwdir.FlwdirRaster
Flow direction raster object.
scale_ratio: int
Size of upscaled (coarse) grid cells.
uparea_name : str, optional
Name of upstream area DataArray, by default None and derived on the fly.
flwdir_name : str, optional
Name of upscaled flow direction raster DataArray, by default "flwdir"
method : {'com2', 'com', 'eam', 'dmm'}
Upscaling method for flow direction data, by default 'com2'.
Returns
-------
da_flwdir = xarray.DataArray
Upscaled D8 flow direction grid.
flwdir_out : pyflwdir.FlwdirRaster
Upscaled pyflwdir flow direction raster object.
See Also
--------
pyflwdir.FlwdirRaster.upscale
"""
if not np.all(flwdir.shape == ds.raster.shape):
raise ValueError("Flwdir and ds dimensions do not match.")
uparea = None
if uparea_name is not None:
if uparea_name in ds.data_vars:
uparea = ds[uparea_name].values
else:
logger.warning(f'Upstream area map "{uparea_name}" not in dataset.')
flwdir_out, idxs_out = flwdir.upscale(
scale_ratio, method=method, uparea=uparea, **kwargs
)
# setup output DataArray
ftype = flwdir.ftype
dims = ds.raster.dims
xs, ys = gis_utils.affine_to_coords(flwdir_out.transform, flwdir_out.shape)
coords = {ds.raster.y_dim: ys, ds.raster.x_dim: xs}
da_flwdir = xr.DataArray(
name=flwdir_name,
data=flwdir_out.to_array(ftype),
coords=coords,
dims=dims,
attrs=dict(long_name=f"{ftype} flow direction", _FillValue=flwdir._core._mv),
)
# translate outlet indices to global x,y coordinates
x_out, y_out = ds.raster.idx_to_xy(idxs_out, mask=idxs_out != flwdir._mv)
da_flwdir.coords["x_out"] = xr.Variable(
dims=dims,
data=x_out,
attrs=dict(long_name="subgrid outlet x coordinate", _FillValue=np.nan),
)
da_flwdir.coords["y_out"] = xr.Variable(
dims=dims,
data=y_out,
attrs=dict(long_name="subgrid outlet y coordinate", _FillValue=np.nan),
)
# outlet indices
da_flwdir.coords["idx_out"] = xr.DataArray(
data=idxs_out,
dims=dims,
attrs=dict(long_name="subgrid outlet index", _FillValue=flwdir._mv),
)
return da_flwdir, flwdir_out
def reproject_hydrography_like(
ds_hydro: xr.Dataset,
da_elv: xr.DataArray,
river_upa: float = 5.0,
river_len: float = 1e3,
uparea_name: str = "uparea",
flwdir_name: str = "flwdir",
logger=logger,
**kwargs,
) -> xr.Dataset:
"""Reproject flow direction and upstream area data to the `da_elv` crs and grid.
Flow directions are derived from a reprojected grid of synthetic elevation,
based on the log10 upstream area [m2]. For regions without upstream area, the original
elevation is used assuming these elevation values are <= 0 (i.e. offshore bathymetry).
The upstream area on the reprojected grid is based on the new flow directions and
rivers entering the domain, defined by the minimum upstream area `river_upa` [km2]
and a distance from river outlets `river_len` [m]. The latter is to avoid setting
boundary conditions at the downstream end / outflow of a river.
NOTE: the resolution of `ds_hydro` should be similar or smaller than the resolution
of `da_elv` for good results.
NOTE: this method is still experimental and might change in the future!
Parameters
----------
ds_hydro: xarray.Dataset
Dataset with gridded flow directions named `flwdir_name` and upstream area
named `uparea_name` [km2].
da_elv: xarray.DataArray
DataArray with elevation on destination grid.
river_upa: float, optional
Minimum upstream area threshold [km2] for inflowing rivers, by default 5 km2
river_len: float, optional
Mimimum distance from river outlet for inflowing river location, by default 1000 m.
uparea_name, flwdir_name : str, optional
Name of upstream area (default "uparea") and flow direction ("flwdir") variables
in `ds_hydro`.
kwargs: key-word arguments
key-word arguments are passed to `d8_from_dem`
Returns
-------
xarray.Dataset
Reprojected gridded dataset with flow direction and upstream area variables.
See Also
--------
d8_from_dem
"""
# check N->S orientation
assert da_elv.raster.res[1] < 0
assert ds_hydro.raster.res[1] < 0
for name in [uparea_name, flwdir_name]:
if name not in ds_hydro:
raise ValueError(f"{name} variable not found in ds_hydro")
crs = da_elv.raster.crs
da_upa = ds_hydro[uparea_name]
nodata = da_upa.raster.nodata
upa_mask = da_upa != nodata
rivmask = da_upa > river_upa
# synthetic elevation -> max(log10(uparea[m2])) - log10(uparea[m2])
elvsyn = np.log10(np.maximum(1.0, da_upa * 1e3))
elvsyn = da_upa.where(~upa_mask, elvsyn.max() - elvsyn)
# take minimum with rank to ensure pits of main rivers have zero syn. elevation
if | np.any(rivmask) | numpy.any |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
from ..utils import cat
from .utils import permute_and_flatten
import scipy.ndimage
import numpy as np
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
if len(mask.shape) == 2:
mask = mask[:, :, np.newaxis]
boxes = np.zeros([mask.shape[-1], 4], dtype=np.float32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
w = x2 - x1
h = y2 - y1
ctr_x = (x1 + 0.5 * w) * 4.0
ctr_y = (y1 + 0.5 * h) * 4.0
w = w * 4.0 / 0.3
h = h * 4.0 / 0.3
x1 = ctr_x - w * 0.5
y1 = ctr_y - h * 0.5
x2 = ctr_x + w * 0.5
y2 = ctr_y + h * 0.5
else:
assert False, (horizontal_indicies, vertical_indicies)
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([x1, y1, x2, y2])
return boxes.astype(np.float32)
class RPNPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size,
box_coder=None,
fpn_post_nms_top_n=None,
fpn_post_nms_per_batch=True,
pred_targets=False,
pred_targets_as_true=False,
):
"""
Arguments:
pre_nms_top_n (int)
post_nms_top_n (int)
nms_thresh (float)
min_size (int)
box_coder (BoxCoder)
fpn_post_nms_top_n (int)
"""
super(RPNPostProcessor, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
if box_coder is None:
box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.box_coder = box_coder
if fpn_post_nms_top_n is None:
fpn_post_nms_top_n = post_nms_top_n
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.fpn_post_nms_per_batch = fpn_post_nms_per_batch
self.pred_targets = pred_targets
self.pred_targets_as_true = pred_targets_as_true
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
gt_boxes = [target.copy_with_fields([]) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
for gt_box in gt_boxes:
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
proposals = [
cat_boxlist((proposal, gt_box))
for proposal, gt_box in zip(proposals, gt_boxes)
]
return proposals
def add_pred_proposals(self, proposals, pred_targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
for idx, proposal in enumerate(proposals):
if pred_targets and pred_targets[idx]:
gt_box = pred_targets[idx].copy_with_fields([])
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device) * 2)
proposals[idx] = cat_boxlist([proposal, gt_box])
return proposals
def forward_for_single_feature_map(self, anchors, objectness, box_regression):
"""
Arguments:
anchors: list[BoxList]
objectness: tensor of size N, A, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = objectness.device
N, A, H, W = objectness.shape
# put in the same format as anchors
objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
objectness = objectness.sigmoid()
box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
num_anchors = A * H * W
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)
batch_idx = torch.arange(N, device=device)[:, None]
box_regression = box_regression[batch_idx, topk_idx]
image_shapes = [box.size for box in anchors]
concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]
proposals = self.box_coder.decode(
box_regression.view(-1, 4), concat_anchors.view(-1, 4)
)
proposals = proposals.view(N, -1, 4)
result = []
for proposal, score, im_shape in zip(proposals, objectness, image_shapes):
boxlist = BoxList(proposal, im_shape, mode="xyxy")
boxlist.add_field("objectness", score)
boxlist = boxlist.clip_to_image(remove_empty=False)
boxlist = remove_small_boxes(boxlist, self.min_size)
boxlist = boxlist_nms(
boxlist,
self.nms_thresh,
max_proposals=self.post_nms_top_n,
score_field="objectness",
)
result.append(boxlist)
return result
def forward(self, anchors, objectness, box_regression, targets=None,
centerness=None, rpn_center_box_regression=None, centerness_pack=None):
"""
Arguments:
anchors: list[list[BoxList]]
objectness: list[tensor]
box_regression: list[tensor]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes = []
num_levels = len(objectness)
anchors = list(zip(*anchors))
for a, o, b in zip(anchors, objectness, box_regression):
sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))
boxlists = list(zip(*sampled_boxes))
boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]
if num_levels > 1:
boxlists = self.select_over_all_levels(boxlists)
# append ground-truth bboxes to proposals
if self.training and targets is not None:
boxlists = self.add_gt_proposals(boxlists, targets)
if self.pred_targets:
pred_targets = []
if True:
for img_centerness, center_box_reg in zip(centerness, rpn_center_box_regression):
# gt_centerness, gt_bbox, anchor_bbox = center_target
# print(rpn_center_box_regression, anchor_bbox)
# gt_mask = gt_centerness.detach().cpu().numpy() > 0.0
img_centerness = img_centerness[0, :, :]
center_box_reg = center_box_reg[:, :, :].permute(1, 2, 0)
anchor_bbox = np.zeros(shape=(center_box_reg.shape[0], center_box_reg.shape[1], 4))
for xx in range(anchor_bbox.shape[1]):
for yy in range(anchor_bbox.shape[0]):
anchor_bbox[yy, xx, :] = [max(0.0, xx * 4 - 16), max(0.0, yy * 4 - 16),
min(xx * 4 + 16, boxlists[0].size[0]),
min(yy * 4 + 16, boxlists[0].size[1])]
anchor_bbox = torch.as_tensor(anchor_bbox, device=center_box_reg.device)
# print(center_box_reg.shape, anchor_bbox.shape)
boxes = self.box_coder.decode(center_box_reg.reshape(-1, 4), anchor_bbox.view(-1, 4))
pred_target = None
pred_score = torch.sigmoid(img_centerness.detach()).cpu().numpy()
pred_mask = pred_score > 0.95
# print(gt_mask.shape, pred_mask.shape)
imllabel, numlabel = scipy.ndimage.label(pred_mask)
if numlabel > 0:
valid = np.zeros(shape=(numlabel,), dtype=np.bool)
box_inds = []
for ano in range(1, numlabel + 1):
mask = imllabel == ano
valid[ano - 1] = True # gt_mask[mask].sum() == 0
box_inds.append(np.argmax(pred_score * mask))
if | np.any(valid) | numpy.any |
import sys
import operator
import pytest
import ctypes
import gc
import warnings
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# But it is currently an equivalent cast:
assert np.can_cast(x, y, casting="equiv")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
zero = 0
one = 1
arr = np.zeros(shape, dt)
gc.collect()
before_zero = sys.getrefcount(zero)
before_one = sys.getrefcount(one)
# Test item getting:
part = arr[index]
after_zero = sys.getrefcount(zero)
assert after_zero - before_zero == count * items_changed
del part
# Test item setting:
arr[index] = one
gc.collect()
after_zero = sys.getrefcount(zero)
after_one = sys.getrefcount(one)
assert before_zero - after_zero == count * items_changed
assert after_one - before_one == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
def test_list_recursion(self):
l = list()
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
def test_dict_recursion(self):
d = dict(names=['self'], formats=[None], offsets=[0])
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['r','b'],"
" 'formats':['u1','u1'],"
" 'offsets':[0,2],"
" 'titles':['Red pixel','Blue pixel'],"
" 'itemsize':3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names':['r','b'], "
"'formats':['u1','u1'], "
"'offsets':[0,2], "
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
buf = pickle.dumps(dtype, proto)
# The dtype pickling itself pickles `np.dtype` if it is pickled
# as a singleton `dtype` should be stored in the buffer:
assert b"_DType_reconstruct" not in buf
assert b"dtype" in buf
pickled = pickle.loads(buf)
assert_equal(pickled, dtype)
assert_equal(pickled.descr, dtype.descr)
if dtype.metadata is not None:
assert_equal(pickled.metadata, dtype.metadata)
# Check the reconstructed dtype is functional
x = | np.zeros(3, dtype=dtype) | numpy.zeros |
"""
Utility used by the Network class to actually train.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
"""
import logging
from compile_keras import *
import keras.utils as kutils
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error, accuracy_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
import os
def mae_mape(actual_y, prediction_y):
mape = safe_mape(actual_y, prediction_y)
mae = mean_absolute_error(actual_y, prediction_y)
return mape * mae
def safe_log(input_array):
return_vals = input_array.copy()
neg_mask = return_vals < 0
return_vals = np.log(np.absolute(return_vals) + 1)
return_vals[neg_mask] *= -1.
return return_vals
def safe_exp(input_array):
return_vals = input_array.copy()
neg_mask = return_vals < 0
return_vals = np.exp(np.clip(np.absolute(return_vals), -7, 7)) - 1
return_vals[neg_mask] *= -1.
return return_vals
def safe_mape(actual_y, prediction_y):
"""
Calculate mean absolute percentage error
Args:
actual_y - numpy array containing targets with shape (n_samples, n_targets)
prediction_y - numpy array containing predictions with shape (n_samples, n_targets)
"""
# Ensure data shape is correct
actual_y = actual_y.reshape(actual_y.shape[0], )
prediction_y = prediction_y.reshape(prediction_y.shape[0], )
# Calculate MAPE
diff = np.absolute((actual_y - prediction_y) / np.clip(np.absolute(actual_y), 1., None))
return 100. * np.mean(diff)
def train_and_score(network):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
"""
df_all_train_x = pd.read_pickle('data/df_all_train_x.pkl.gz', compression='gzip')
df_all_train_y = pd.read_pickle('data/df_all_train_y.pkl.gz', compression='gzip')
df_all_train_actuals = pd.read_pickle('data/df_all_train_actuals.pkl.gz', compression='gzip')
df_all_test_x = pd.read_pickle('data/df_all_test_x.pkl.gz', compression='gzip')
df_all_test_y = pd.read_pickle('data/df_all_test_y.pkl.gz', compression='gzip')
df_all_test_actuals = pd.read_pickle('data/df_all_test_actuals.pkl.gz', compression='gzip')
train_y = df_all_train_y[0].values
train_actuals = df_all_train_actuals[0].values
train_x = df_all_train_x.values
test_actuals = df_all_test_actuals.values
test_y = df_all_test_y[0].values
test_x = df_all_test_x.values
print('\rNetwork')
for property in network:
print(property, ':', network[property])
logging.info('%s: %s' % (property, network[property]))
if 'result' in network:
result = network['result']
else:
result = 'mae'
# Set use of log of y or y
if network['log_y']:
train_eval_y = train_y
else:
train_eval_y = train_actuals
if 'epochs' in network:
epochs = network['epochs']
else:
epochs = 500
# network['int_layer'] = True
results = {
'mae': [],
'mape': [],
'maeape': [],
'epochs': [],
}
for i in range(1):
# Clear all values
s = None
x_cv_train = None
y_cv_train = None
model = None
history = None
hist_epochs = None
# Delete weights file, if exists
try:
os.remove('weights.hdf5')
except:
pass
# Reorder array - get array index
s = np.arange(train_x.shape[0])
# Reshuffle index
np.random.shuffle(s)
# Create array using new index
x_cv_train = train_x[s]
y_cv_train = train_eval_y[s]
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, verbose=1, patience=3)
early_stopping = EarlyStopping(monitor='val_loss', patience=10)
csv_logger = CSVLogger('./logs/training.log')
checkpointer = ModelCheckpoint(filepath='weights.hdf5', verbose=0, save_best_only=True)
dimensions = train_x.shape[1]
model = compile_keras_model(network, dimensions)
history = model.fit(x_cv_train, y_cv_train,
batch_size=network['batch_size'],
epochs=epochs, # using early stopping, so no real limit
verbose=0,
validation_split=0.2,
callbacks=[early_stopping, reduce_lr, csv_logger, checkpointer])
model.load_weights('weights.hdf5')
predictions = model.predict(test_x)
mae = mean_absolute_error(test_actuals, predictions)
mape = safe_mape(test_actuals, predictions)
maeape = mae_mape(test_actuals, predictions)
hist_epochs = len(history.history['val_loss'])
results['mae'].append(mae)
results['mape'].append(mape)
results['maeape'].append(maeape)
results['epochs'].append(hist_epochs)
print('\rFold results')
print('epochs:', hist_epochs)
print('mae_mape:', maeape)
print('mape:', mape)
print('mae:', mae)
print('-' * 20)
overall_scores = {
'mae': np.mean(results['mae']),
'mape': np.mean(results['mape']),
'maeape': np.mean(results['maeape']),
'epochs': np.mean(results['epochs']),
}
print('\rResults')
print('epochs:', overall_scores['epochs'])
print('mae_mape:', overall_scores['maeape'])
print('mape:', overall_scores['mape'])
print('mae:', overall_scores['mae'])
print('-' * 20)
print('result:', overall_scores[result])
print('-' * 20)
logging.info('epochs: %.1f' % overall_scores['epochs'])
logging.info('mae_mape: %.4f' % overall_scores['maeape'])
logging.info('mape: %.4f' % overall_scores['mape'])
logging.info('mae: %.4f' % overall_scores['mae'])
logging.info('-' * 20)
logging.info('result: %.4f' % overall_scores[result])
logging.info('-' * 20)
return overall_scores[result]
def train_and_score_bagging(network):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
"""
train_predictions = pd.read_pickle('data/train_predictions.pkl.gz', compression='gzip')
test_predictions = pd.read_pickle('data/test_predictions.pkl.gz', compression='gzip')
train_actuals = pd.read_pickle('data/train_actuals.pkl.gz', compression='gzip')
test_actuals = pd.read_pickle('data/test_actuals.pkl.gz', compression='gzip')
train_x = train_predictions.values
train_y = train_actuals[0].values
train_log_y = safe_log(train_y)
test_x = test_predictions.values
test_y = test_actuals[0].values
test_log_y = safe_log(test_y)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, verbose=1, patience=3)
early_stopping = EarlyStopping(monitor='val_loss', patience=15)
csv_logger = CSVLogger('./logs/training.log')
checkpointer = ModelCheckpoint(filepath='weights.hdf5', verbose=0, save_best_only=True)
dimensions = train_x.shape[1]
model = compile_keras_model(network, dimensions)
print('\rNetwork')
for property in network:
print(property, ':', network[property])
logging.info('%s: %s' % (property, network[property]))
# history = model.fit(train_x, train_y,
history = model.fit(train_x, train_log_y,
batch_size=network['batch_size'],
epochs=10000, # using early stopping, so no real limit
verbose=0,
validation_split=0.2,
callbacks=[early_stopping, csv_logger, reduce_lr, checkpointer])
print('\rResults')
hist_epochs = len(history.history['val_loss'])
# score = history.history['val_loss'][hist_epochs - 1]
model.load_weights('weights.hdf5')
predictions = model.predict(test_x)
prediction_results = predictions.reshape(predictions.shape[0],)
prediction_results = safe_exp(prediction_results)
score = safe_mape(test_y, prediction_results)
if np.isnan(score):
score = 9999
print('epochs:', hist_epochs)
print('loss:', score)
print('-' * 20)
logging.info('epochs: %d' % hist_epochs)
logging.info('loss: %.4f' % score)
logging.info('-' * 20)
return score
def train_and_score_shallow_bagging(network):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
"""
train_predictions = pd.read_pickle('data/train_predictions.pkl.gz', compression='gzip')
test_predictions = pd.read_pickle('data/test_predictions.pkl.gz', compression='gzip')
train_actuals = pd.read_pickle('data/train_actuals.pkl.gz', compression='gzip')
test_actuals = pd.read_pickle('data/test_actuals.pkl.gz', compression='gzip')
target_columns = ['xgboost_keras_log', 'xgboost_keras_log_log', 'xgboost_log', 'keras_mape']
cols_to_drop = []
for col in train_predictions.columns:
if col not in target_columns:
cols_to_drop.append(col)
print('Dropping columns:', list(cols_to_drop))
train_predictions.drop(cols_to_drop, axis=1, inplace=True)
cols_to_drop = []
for col in test_predictions.columns:
if col not in target_columns:
cols_to_drop.append(col)
print('Dropping columns:', list(cols_to_drop))
test_predictions.drop(cols_to_drop, axis=1, inplace=True)
train_x = train_predictions.values
train_y = train_actuals[0].values
train_log_y = safe_log(train_y)
test_x = test_predictions.values
test_y = test_actuals[0].values
test_log_y = safe_log(test_y)
# Set use of log of y or y
if network['log_y']:
train_eval_y = train_log_y
test_eval_y = test_log_y
else:
train_eval_y = train_y
test_eval_y = test_y
if 'epochs' in network:
epochs = network['epochs']
else:
epochs = 5000
# Apply value scaling
scaler = MinMaxScaler(feature_range=(0,1))
train_x_scaled = scaler.fit_transform(train_x)
test_x_scaled = scaler.transform(test_x)
results = {
'mae': [],
'mape': [],
'maeape': [],
'epochs': [],
}
print('\rNetwork')
for property in network:
print(property, ':', network[property])
logging.info('%s: %s' % (property, network[property]))
num_folds = 1
for _ in range(num_folds):
# Clear all values
s = None
x_cv_train = None
y_cv_train = None
model = None
history = None
hist_epochs = None
# Delete weights file, if exists
try:
os.remove('weights.hdf5')
except:
pass
# Reorder array - get array index
s = np.arange(train_x_scaled.shape[0])
# Reshuffle index
np.random.shuffle(s)
# Create array using new index
x_cv_train = train_x_scaled[s]
y_cv_train = train_eval_y[s]
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, verbose=1, patience=2)
early_stopping = EarlyStopping(monitor='val_loss', patience=7)
csv_logger = CSVLogger('./logs/training.log')
checkpointer = ModelCheckpoint(filepath='weights.hdf5', verbose=0, save_best_only=True)
input_shape = train_x_scaled.shape[1]
model = compile_keras_model(network, input_shape)
# history = model.fit(train_x, train_y,
history = model.fit(x_cv_train, y_cv_train,
batch_size=network['batch_size'],
epochs=epochs,
verbose=0,
validation_split=0.2,
callbacks=[early_stopping, csv_logger, reduce_lr, checkpointer])
model.load_weights('weights.hdf5')
predictions = model.predict(test_x_scaled)
prediction_results = predictions.reshape(predictions.shape[0],)
# If using log of y, get exponent
if network['log_y']:
prediction_results = safe_exp(prediction_results)
mae = mean_absolute_error(test_y, prediction_results)
mape = safe_mape(test_y, prediction_results)
maeape = mae_mape(test_y, prediction_results)
hist_epochs = len(history.history['val_loss'])
results['mae'].append(mae)
results['mape'].append(mape)
results['maeape'].append(maeape)
results['epochs'].append(hist_epochs)
print('\rFold results')
print('epochs:', hist_epochs)
print('mae_mape:', maeape)
print('mape:', mape)
print('mae:', mae)
print('-' * 20)
overall_scores = {
'mae': np.mean(results['mae']),
'mape': np.mean(results['mape']),
'maeape': | np.mean(results['maeape']) | numpy.mean |
import os
import torch
import torch.nn as nn
import random
import numpy as np
from sklearn.metrics import f1_score
def set_random_state(seed_value):
random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| np.random.seed(seed_value) | numpy.random.seed |
import numpy as np
import tqdm
from scipy import stats
from hmmpy.base import BaseHiddenMarkov
class SampleHMM(BaseHiddenMarkov):
"""
Class to handle sampling from HMM hidden_markov with user parameters.
Parameters
----------
n_states : int, default=2
Number of hidden states
hmm_params: dict, default=None
hmm model parameters to sample from.
To set params, create a dict with 'mu', 'std' and 'tpm' as keys
and their values in lists or numpy arrays.
random_state : int, default = 42
Parameter set to recreate output
Attributes
----------
mu : ndarray of shape (n_states,)
means to sample from
std : ndarray of shape (n_states,)
STDs to sample from
tpm : ndarray of shape (n_states, n_states)
Transition probability matrix between states
"""
def __init__(self, n_states=2, frequency='daily', hmm_params=None, random_state=42):
if hmm_params == None or (hmm_params == None and frequency == "daily"): # hmm params following Hardy (2001)
# Convert from monthly time scale t=20 to daily t=1
hmm_params = {'mu': np.array([0.0123, -0.0157]) / 20,
'std': np.array([0.0347, 0.0778]) /np.sqrt(20),
'tpm': np.array([[1-0.0021, 0.0021], # TODO figure out powers of vectors in python
[0.0120, 1-0.0120]])
}
elif hmm_params == None and frequency == "monthly":
hmm_params = {'mu': np.array([0.0123, -0.0157]),
'std': np.array([0.0347, 0.0778]),
'tpm': np.array([[0.9629, 0.0371],
[0.2101, 0.7899]])
}
self.type = 'sampler'
self.is_fitted = True
self.n_states = n_states
self.mu = np.array(hmm_params['mu'])
self.std = np.array(hmm_params['std'])
self.tpm = np.array(hmm_params['tpm'])
self.stationary_dist = super()._get_stationary_dist(self.tpm)
self.start_proba = self.stationary_dist
self.random_state = random_state
np.random.seed(self.random_state)
def sample(self, n_samples, n_sequences=1):
'''
Sample states from a fitted Hidden Markov Model.
Parameters
----------
n_samples : int
Amount of samples to generate
n_sequences : int, default=1
Number of independent sequences to sample from, e.g. if n_samples=100 and n_sequences=3
then 3 different sequences of length 100 are sampled
Returns
-------
samples : ndarray of shape (n_samples, n_sequences)
Outputs the generated samples of size n_samples
sample_states : ndarray of shape (n_samples, n_sequences)
Outputs sampled states
'''
mu = self.mu
std = self.std
tpm = self.tpm
stationary_dist = self.stationary_dist
state_index = np.arange(start=0, stop=self.n_states, step=1, dtype=np.int32) # Array of possible states
sample_states = np.zeros(shape=(n_samples, n_sequences), dtype=np.int32) # Init sample vector
samples = np.zeros(shape=(n_samples, n_sequences)) # Init sample vector
print(f'Simulating {n_sequences} of lengths {n_samples}')
for seq in tqdm.tqdm(range(n_sequences)):
sample_states[0, seq] = np.random.choice(a=state_index, size=1, p=stationary_dist)
for t in range(1, n_samples):
# Each new state is chosen using the transition probs corresponding to the previous state sojourn.
sample_states[t, seq] = np.random.choice(a=state_index, size=1, p=tpm[sample_states[t - 1, seq], :])
samples[:, seq] = stats.norm.rvs(loc=mu[sample_states[:, seq]], scale=std[sample_states[:, seq]], size=n_samples)
if n_sequences == 1:
sample_states = sample_states[:, 0]
samples = samples[:, 0]
return samples, sample_states
def sample_t(self, n_samples, n_sequences=1, dof=5):
'''
Sample states from a fitted Hidden Markov Model.
Parameters
----------
n_samples : int
Amount of samples to generate
n_sequences : int, default=1
Number of independent sequences to sample from, e.g. if n_samples=100 and n_sequences=3
then 3 different sequences of length 100 are sampled
dof : int, default=5
degrees of freedom in the conditional t-distributions.
Returns
-------
samples : ndarray of shape (n_samples, n_sequences)
Outputs the generated samples of size n_samples
sample_states : ndarray of shape (n_samples, n_sequences)
Outputs sampled states
'''
mu = self.mu
std = self.std
tpm = self.tpm
stationary_dist = self.stationary_dist
state_index = | np.arange(start=0, stop=self.n_states, step=1, dtype=np.int32) | numpy.arange |
import site # so that ai4water directory is in path
import unittest
import os
import sys
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import numpy as np
import pandas as pd
from ai4water.preprocessing.transformations import Transformation
from ai4water.tf_attributes import tf
from ai4water.datasets import busan_beach
if 230 <= int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 250:
from ai4water.functional import Model
print(f"Switching to functional API due to tensorflow version {tf.__version__}")
else:
from ai4water import Model
df = pd.DataFrame(np.concatenate([np.arange(1, 10).reshape(-1, 1), np.arange(1001, 1010).reshape(-1, 1)], axis=1),
columns=['data1', 'data2'])
def build_and_run(x_transformation, y_transformation,
data, inputs, outputs):
model = Model(model="RandomForestRegressor",
input_features=inputs,
output_features=outputs,
x_transformation=x_transformation,
y_transformation=y_transformation,
verbosity=0)
model.fit(data=data)
x, y = model.training_data(key='junk')
#pred, pred = model.inverse_transform(y, y, key='junk')
pred, index = model.dh_.deindexify(y, key='junk')
pred = pd.DataFrame(pred.reshape(len(pred), model.num_outs), columns=outputs, index=index).sort_index()
return pred
def run_method1(method,
cols=None,
data=None,
**kwargs):
normalized_df1, scaler = Transformation(method=method,
features=cols,
**kwargs)(data,
'fit_transform',
return_key=True)
denormalized_df1 = Transformation(features=cols,
)(normalized_df1,
'inverse',
scaler=scaler['scaler'])
return normalized_df1, denormalized_df1
def run_method2(method,
data=None,
index=None,
**kwargs):
if index:
data.index = pd.date_range("20110101", periods=len(data), freq="D")
scaler = Transformation(method=method,
**kwargs)
normalized_df, scaler_dict = scaler.fit_transform(data, return_key=True)
denormalized_df = scaler.inverse_transform(data=normalized_df, key=scaler_dict['key'])
return data, normalized_df, denormalized_df
def run_method3(method,
data=None,
index=None,
**kwargs):
if index:
data.index = pd.date_range("20110101", periods=len(data), freq="D")
scaler = Transformation(method=method,
**kwargs)
normalized_df3, scaler_dict = scaler(data,
return_key=True)
denormalized_df3 = scaler(what='inverse', data=normalized_df3, key=scaler_dict['key'])
return data, normalized_df3, denormalized_df3
def run_method4(method,data=None, **kwargs):
scaler = Transformation(**kwargs)
normalized_df4, scaler_dict = getattr(scaler, "fit_transform_with_" + method)(
data=data,
return_key=True)
denormalized_df4 = getattr(scaler, "inverse_transform_with_" + method)(data=normalized_df4, key=scaler_dict['key'])
return normalized_df4, denormalized_df4
def run_log_methods(method="log", index=None, insert_nans=True, insert_zeros=False, assert_equality=True,
insert_ones=False):
a = np.random.random((10, 4))
a[0, 0] = np.nan
a[0, 1] = 1.
if insert_nans or insert_zeros:
a[2:4, 1] = np.nan
a[3:5, 2:3] = np.nan
if insert_zeros:
a[5:8, 3] = 0.0
if insert_ones:
a[6, 1] = 1.0
a[9, 2:3] = 1.0
cols = ['data1', 'data2', 'data3', 'data4']
if index is not None:
index = pd.date_range("20110101", periods=len(a), freq="D")
df3 = pd.DataFrame(a, columns=cols, index=index)
_, _ = run_method1(method=method, data=df3.copy())
_, _, dfo2 = run_method2(method=method, data=df3.copy())
_, _, dfo3 = run_method3(method=method, data=df3.copy())
_, dfo4 = run_method4(method=method, data=df3.copy())
if assert_equality:
#assert np.allclose(df3, dfo1, equal_nan=True)
assert | np.allclose(df3, dfo2, equal_nan=True) | numpy.allclose |
# coding: utf-8
# In[94]:
import numpy as np
def randPair(s,e):
return np.random.randint(s,e), np.random.randint(s,e)
#finds an array in the "depth" dimension of the grid
def findLoc(state, obj):
for i in range(0,4):
for j in range(0,4):
if (state[i,j] == obj).all():
return i,j
#Initialize stationary grid, all items are placed deterministically
def initGrid():
state = np.zeros((4,4,5))
#place player 1
state[0,1] = np.array([0,0,0,1,0])
#place wall
state[2,2] = np.array([0,0,1,0,0])
#place pit
state[1,1] = np.array([0,1,0,0,0])
#place goal
state[3,3] = np.array([1,0,0,0,0])
#place player 2
state[randPair(0,4)] = np.array([0,0,0,0,1])
a1 = findLoc(state, np.array([0,0,0,1,0])) #find grid position of player1 (agent)
a2 = findLoc(state, np.array([0,0,0,0,1])) #find grid position of player2
w = findLoc(state, np.array([0,0,1,0,0])) #find wall
g = findLoc(state, np.array([1,0,0,0,0])) #find goal
p = findLoc(state, np.array([0,1,0,0,0])) #find pit
if (not a1 or not a2 or not w or not g or not p):
#print('Invalid grid. Rebuilding..')
return initGrid()
return state
# In[95]:
def makeMove(state, action,player2_terminated):
#need to locate player in grid
#need to determine what object (if any) is in the new grid spot the player is moving to
player1_loc = getLoc(state, 3)
if not player2_terminated:
player2_loc = getLoc(state, 4)
wall = findLoc(state, np.array([0,0,1,0,0]))
goal = findLoc(state, np.array([1,0,0,0,0]))
pit = findLoc(state, np.array([0,1,0,0,0]))
state = np.zeros((4,4,5))
#print player1_loc
#print player2_loc
#up (row - 1)
if action==0:
new_loc = (player1_loc[0] - 1, player1_loc[1])
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
state[new_loc][3] = 1
#down (row + 1)
elif action==1:
new_loc = (player1_loc[0] + 1, player1_loc[1])
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
state[new_loc][3] = 1
#left (column - 1)
elif action==2:
new_loc = (player1_loc[0], player1_loc[1] - 1)
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
state[new_loc][3] = 1
#right (column + 1)
elif action==3:
new_loc = (player1_loc[0], player1_loc[1] + 1)
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
state[new_loc][3] = 1
new_player1_loc = getLoc(state, 3)
#print new_player1_loc
if (not new_player1_loc):
state[player1_loc] = np.array([0,0,0,1,0])
#re-place pit
state[pit][1] = 1
#re-place wall
state[wall][2] = 1
#re-place goal
state[goal][0] = 1
if not player2_terminated:
#re-place player 2
state[player2_loc][4] = 1
return state
# In[96]:
def makeMovePlayer2(state, player2_terminated, testing_mode, input_action = 0):
#need to locate player in grid
#need to determine what object (if any) is in the new grid spot the player is moving to
player1_loc = getLoc(state, 3)
player2_loc = getLoc(state, 4)
wall = findLoc(state, np.array([0,0,1,0,0]))
goal = findLoc(state, np.array([1,0,0,0,0]))
pit = findLoc(state, np.array([0,1,0,0,0]))
state = np.zeros((4,4,5))
if testing_mode:
#print player2_loc
action = raw_input("Enter 0 for up, 1 for down, 2 for left, 3 for right ")
elif not testing_mode:
action = str(input_action)
#up (row - 1)
if action==str(0):
new_loc = (player2_loc[0] - 1, player2_loc[1])
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
if new_loc != goal and new_loc != pit:
state[new_loc][4] = 1
elif new_loc == goal:
state[new_loc] = np.array([1,0,0,0,1])
elif new_loc == pit:
state[new_loc] = np.array([0,1,0,0,1])
#down (row + 1)
elif action==str(1):
new_loc = (player2_loc[0] + 1, player2_loc[1])
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
if new_loc != goal and new_loc != pit:
state[new_loc][4] = 1
elif new_loc == goal:
state[new_loc] = np.array([1,0,0,0,1])
elif new_loc == pit:
state[new_loc] = np.array([0,1,0,0,1])
#left (column - 1)
elif action==str(2):
new_loc = (player2_loc[0], player2_loc[1] - 1)
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
if new_loc != goal and new_loc != pit:
state[new_loc][4] = 1
elif new_loc == goal:
state[new_loc] = np.array([1,0,0,0,1])
elif new_loc == pit:
state[new_loc] = np.array([0,1,0,0,1])
#right (column + 1)
elif action==str(3):
new_loc = (player2_loc[0], player2_loc[1] + 1)
if (new_loc != wall):
if ((np.array(new_loc) <= (3,3)).all() and (np.array(new_loc) >= (0,0)).all()):
if new_loc != goal and new_loc != pit:
state[new_loc][4] = 1
elif new_loc == goal:
state[new_loc] = np.array([1,0,0,0,1])
elif new_loc == pit:
state[new_loc] = np.array([0,1,0,0,1])
new_player2_loc = getLoc(state, 4)
if (not new_player2_loc):
state[player2_loc] = np.array([0,0,0,0,1])
#re-place pit
state[pit][1] = 1
#re-place wall
state[wall][2] = 1
#re-place goal
state[goal][0] = 1
#re-place player 1
state[player1_loc][3] = 1
return state
# In[97]:
def getLoc(state, level):
for i in range(0,4):
for j in range(0,4):
if (state[i,j][level] == 1):
return i,j
def getReward(state):
player_loc = getLoc(state, 3)
pit = getLoc(state, 1)
goal = getLoc(state, 0)
if (player_loc == pit):
return -10
elif (player_loc == goal):
return 10
else:
return -1
def getRewardPlayer2(state):
player2_loc = getLoc(state, 4)
pit = getLoc(state, 1)
goal = getLoc(state, 0)
if (player2_loc == pit):
return -10
elif (player2_loc == goal):
return 10
else:
return -1
def dispGrid(state,player2_terminated):
grid = np.zeros((4,4), dtype='<U2')
player1_loc = getLoc(state, 3)
player2_loc = getLoc(state, 4)
wall = findLoc(state, np.array([0,0,1,0,0]))
goal = findLoc(state, np.array([1,0,0,0,0]))
pit = findLoc(state, np.array([0,1,0,0,0]))
for i in range(0,4):
for j in range(0,4):
grid[i,j] = ' '
if player1_loc:
grid[player1_loc] = 'P1' #player1
if player2_loc:
if not player2_terminated:
grid[player2_loc] = 'P2' #player2
if player1_loc == player2_loc and not player2_terminated:
grid[player1_loc] = 'PB' #player1 and player2
if wall:
grid[wall] = 'W' #wall
if goal:
grid[goal] = '+' #goal
if pit:
grid[pit] = '-' #pit
return grid
# In[98]:
state = initGrid()
dispGrid(state, True)
# In[99]:
state = makeMove(state, 0,True)
print('Reward: %s' % (getReward(state),))
dispGrid(state,True)
# In[100]:
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.models import model_from_json
import os
# In[101]:
model = Sequential()
model.add(Dense(164, init='lecun_uniform', input_shape=(80,)))
model.add(Activation('relu'))
#model.add(Dropout(0.2)) I'm not using dropout, but maybe you wanna give it a try?
model.add(Dense(150, init='lecun_uniform'))
model.add(Activation('relu'))
#model.add(Dropout(0.2))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) #linear output so we can have range of real-valued outputs
rms = RMSprop()
model.compile(loss='mse', optimizer=rms)
# In[102]:
model.predict(state.reshape(1,80), batch_size=1)
#just to show an example output; read outputs left to right: up/down/left/right
# In[103]:
from IPython.display import clear_output
import random
player2_terminated = False
epochs = 1000
gamma = 0.9 #since it may take several moves to goal, making gamma high
epsilon = 1
for i in range(epochs):
state = initGrid()
status = 1
player2_terminated = False
#while game still in progress
while(status == 1):
#We are in state S
#Let's run our Q function on S to get Q values for all possible actions
qval = model.predict(state.reshape(1,80), batch_size=1)
if (random.random() < epsilon): #choose random action
action = | np.random.randint(0,4) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 8 17:15:16 2020
@author: Pavan
"""
import numpy as np
def get_valid_index(strategy_weights,delay=1):
valid_index = ~np.isnan(strategy_weights).all(axis=1)
valid_index[-1*delay]=False
return valid_index
def bets_to_pnl(starting_value,strategy_weights,clean_values,base_data,long_lev, short_lev, ADV_threshold_percentage,costs_threshold_bps,commissions_bps):
cleaned_weights = strategy_weights[clean_values]
O = (base_data[0])[clean_values]
C = (base_data[3])[clean_values]
Liq = (base_data[5])[clean_values]
H = (base_data[1])[clean_values]
L = (base_data[2])[clean_values]
dollars_at_open = np.empty(cleaned_weights.shape)
dollars_at_open_calc = | np.empty(cleaned_weights.shape) | numpy.empty |
"""
Module containing classes for ray tracing through the ice.
Ray tracer classes correspond to ray trace path classes, where the ray
tracer is responsible for calculating the existence and launch angle of
paths between points, and the ray tracer path objects are responsible for
returning information about propagation along their respective path.
"""
import logging
import numpy as np
import scipy.constants
import scipy.fft
import scipy.optimize
from pyrex.internal_functions import normalize, LazyMutableClass, lazy_property
from pyrex.ice_model import AntarcticIce, UniformIce, ice
logger = logging.getLogger(__name__)
class BasicRayTracePath(LazyMutableClass):
"""
Class for representing a single ray-trace solution between points.
Stores parameters of the ray path with calculations performed by
integrating z-steps of size ``dz``. Most properties are lazily evaluated
to save on computation time. If any attributes of the class instance are
changed, the lazily-evaluated properties will be cleared.
Parameters
----------
parent_tracer : BasicRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
BasicRayTracer : Class for calculating the ray-trace solutions between
points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
def __init__(self, parent_tracer, launch_angle, direct):
self.from_point = parent_tracer.from_point
self.to_point = parent_tracer.to_point
self.theta0 = launch_angle
self.ice = parent_tracer.ice
self.dz = parent_tracer.dz
self.direct = direct
super().__init__()
@property
def _metadata(self):
"""Metadata dictionary for writing `BasicRayTracePath` information."""
return {
"n0": self.n0,
"dz": self.dz,
"emitted_x": self.emitted_direction[0],
"emitted_y": self.emitted_direction[1],
"emitted_z": self.emitted_direction[2],
"received_x": self.received_direction[0],
"received_y": self.received_direction[1],
"received_z": self.received_direction[2],
"launch_angle": np.arccos(self.emitted_direction[2]),
"receiving_angle": np.pi-np.arccos(self.received_direction[2]),
"path_length": self.path_length,
"tof": self.tof
}
@property
def z_turn_proximity(self):
"""
Parameter for how closely path approaches z_turn.
Necessary to avoid diverging integrals which occur at z_turn.
"""
# Best value of dz/10 determined empirically by checking errors
return self.dz/10
@property
def z0(self):
"""Depth (m) of the launching point."""
return self.from_point[2]
@property
def z1(self):
"""Depth (m) of the receiving point."""
return self.to_point[2]
@lazy_property
def n0(self):
"""Index of refraction of the ice at the launching point."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance (m) between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def phi(self):
"""Azimuthal angle (radians) between the endpoints."""
u = self.to_point - self.from_point
return np.arctan2(u[1], u[0])
@lazy_property
def beta(self):
"""Launching beta parameter (n(z0) * sin(theta0))."""
return self.n0 * np.sin(self.theta0)
@lazy_property
def z_turn(self):
"""Turning depth (m) of the path."""
return self.ice.depth_with_index(self.beta)
# @property
# def exists(self):
# """Boolean of whether the path between the points with the
# given launch angle exists."""
# return True
@lazy_property
def emitted_direction(self):
"""Direction in which ray is emitted."""
return np.array([np.sin(self.theta0) * np.cos(self.phi),
np.sin(self.theta0) * np.sin(self.phi),
np.cos(self.theta0)])
@lazy_property
def received_direction(self):
"""Direction ray is travelling when it is received."""
if self.direct:
sign = np.sign(np.cos(self.theta0))
return np.array([np.sin(self.theta(self.z1)) * np.cos(self.phi),
np.sin(self.theta(self.z1)) * np.sin(self.phi),
sign*np.cos(self.theta(self.z1))])
else:
return np.array([np.sin(self.theta(self.z1)) * np.cos(self.phi),
np.sin(self.theta(self.z1)) * np.sin(self.phi),
-np.cos(self.theta(self.z1))])
def theta(self, z):
"""
Polar angle of the ray at the given depths.
Calculates the polar angle of the ray's direction at the given depth
in the ice. Note that the ray could be travelling upward or downward
at this polar angle.
Parameters
----------
z : array_like
(Negative-valued) depths (m) in the ice.
Returns
-------
array_like
Polar angle at the given values of `z`.
"""
return np.arcsin(np.sin(self.theta0) * self.n0/self.ice.index(z))
# Log-scaled zs (commented out below and in z_integral method) seemed
# like a good idea for reducing dimensionality, but didn't work out.
# Kept here in case it works out better in the future
# @lazy_property
# def dn(self):
# return np.abs(self.ice.gradient(-10)[2])*self.dz
# def _log_scale_zs(self, z0, z1):
# # Base dn on dz at 10 meter depth
# n0 = self.ice.index(z0)
# n1 = self.ice.index(z1)
# n_steps = int(np.abs(n1-n0)/self.dn)
# ns = np.linspace(n0, n1, n_steps+2)
# return self.ice.depth_with_index(ns)
def z_integral(self, integrand):
"""
Calculate the numerical integral of the given integrand.
For the integrand as a function of z, the numerical integral is
calculated along the ray path.
Parameters
----------
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
Returns
-------
float
The value of the numerical integral along the ray path.
"""
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs, dz = np.linspace(self.z0, self.z1, n_zs+1, retstep=True)
return np.trapz(integrand(zs), dx=np.abs(dz), axis=0)
# zs = self._log_scale_zs(self.z0, self.z1)
# return np.trapz(integrand(zs), x=zs, axis=0)
else:
n_zs_1 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z0)/self.dz)
zs_1, dz_1 = np.linspace(self.z0, self.z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
n_zs_2 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z1)/self.dz)
zs_2, dz_2 = np.linspace(self.z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
return (np.trapz(integrand(zs_1), dx=np.abs(dz_1), axis=0) +
np.trapz(integrand(zs_2), dx=np.abs(dz_2), axis=0))
# zs_1 = self._log_scale_zs(self.z0, self.z_turn-self.z_turn_proximity)
# zs_2 = self._log_scale_zs(self.z1, self.z_turn-self.z_turn_proximity)
# return (np.trapz(integrand(zs_1), x=zs_1, axis=0) +
# np.trapz(integrand(zs_2), x=zs_2, axis=0))
@lazy_property
def path_length(self):
"""Length (m) of the ray path."""
return self.z_integral(lambda z: 1/np.cos(self.theta(z)))
@lazy_property
def tof(self):
"""Time of flight (s) along the ray path."""
return self.z_integral(lambda z: self.ice.index(z) / scipy.constants.c
/ np.cos(self.theta(z)))
@lazy_property
def fresnel(self):
"""
Fresnel factors for reflection off the ice surface.
The fresnel reflectance calculated is the square root (ratio of
amplitudes, not powers) for reflection off ice surface (1 if doesn't
reach surface). Stores the s and p polarized reflectances, respectively.
"""
if self.direct or self.z_turn<self.ice.valid_range[1]:
return 1, 1
else:
n_1 = self.ice.index(self.ice.valid_range[1])
n_2 = self.ice.index_above
theta_1 = self.theta(self.ice.valid_range[1])
cos_1 = np.cos(theta_1)
sin_2 = n_1/n_2*np.sin(theta_1)
if sin_2<=1:
# Plain reflection with real coefficients
cos_2 = np.sqrt(1 - (sin_2)**2)
else:
# Total internal reflection off the surface, results in complex
# fresnel factors encoding the phase data
cos_2 = np.sqrt((sin_2)**2 - 1)*1j
# TODO: Confirm sign convention here
r_s = (n_1*cos_1 - n_2*cos_2) / (n_1*cos_1 + n_2*cos_2)
r_p = (n_2*cos_1 - n_1*cos_2) / (n_2*cos_1 + n_1*cos_2)
return r_s, r_p
def attenuation(self, f):
"""
Calculate the attenuation factor for signal frequencies.
Calculates the attenuation factor to be multiplied by the signal
amplitude at the given frequencies.
Parameters
----------
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
Returns
-------
array_like
Attenuation factors for the signal at the frequencies `f`.
"""
fa = np.abs(f)
def integrand(z):
partial_integrand = 1 / np.cos(self.theta(z))
alen = self.ice.attenuation_length(z, fa)
return (partial_integrand / alen.T).T
return np.exp(-np.abs(self.z_integral(integrand)))
def propagate(self, signal=None, polarization=None,
attenuation_interpolation=None):
"""
Propagate the signal with optional polarization along the ray path.
Applies the frequency-dependent signal attenuation along the ray path
and shifts the times according to the ray time of flight. Additionally
provides the s and p polarization directions.
Parameters
----------
signal : Signal, optional
``Signal`` object to propagate.
polarization : array_like, optional
Vector representing the linear polarization of the `signal`.
attenuation_interpolation: float, optional
Logarithmic (base 10) interpolation step to be used for
interpolating attenuation along the ray path. If `None`, no
interpolation is applied and the attenuation is pre-calculated at
the expected signal frequencies.
Returns
-------
tuple of Signal
Tuple of ``Signal`` objects representing the s and p polarizations
of the original `signal` attenuated along the ray path. Only
returned if `signal` was not ``None``.
tuple of ndarray
Tuple of polarization vectors representing the s and p polarization
directions of the `signal` at the end of the ray path. Only
returned if `polarization` was not ``None``.
See Also
--------
pyrex.Signal : Base class for time-domain signals.
"""
if polarization is None:
if signal is None:
return
else:
new_signal = signal.copy()
new_signal.shift(self.tof)
# Pre-calculate attenuation at the designated frequencies to
# save on heavy computation time of the attenuation method
freqs = scipy.fft.fftfreq(2*len(signal.times), d=signal.dt)
if attenuation_interpolation is None:
freqs.sort()
else:
logf_min = np.log10(np.min(freqs[freqs>0]))
logf_max = np.log10(np.max(freqs))
n_steps = int((logf_max - logf_min)
/ attenuation_interpolation)
if (logf_max-logf_min)%attenuation_interpolation:
n_steps += 1
logf = np.logspace(logf_min, logf_max, n_steps+1)
freqs = np.concatenate((-np.flipud(logf), [0], logf))
atten_vals = self.attenuation(freqs)
attenuation = lambda f: np.interp(f, freqs, atten_vals)
new_signal.filter_frequencies(attenuation)
return new_signal
else:
# Unit vectors perpendicular and parallel to plane of incidence
# at the launching point
u_s0 = normalize(np.cross(self.emitted_direction, [0, 0, 1]))
u_p0 = normalize(np.cross(u_s0, self.emitted_direction))
# Unit vector parallel to plane of incidence at the receiving point
# (perpendicular vector stays the same)
u_p1 = normalize(np.cross(u_s0, self.received_direction))
if signal is None:
return (u_s0, u_p1)
else:
# Amplitudes of s and p components
pol_s = np.dot(polarization, u_s0)
pol_p = np.dot(polarization, u_p0)
# Fresnel reflectances of s and p components
r_s, r_p = self.fresnel
# Pre-calculate attenuation at the designated frequencies to
# save on heavy computation time of the attenuation method
freqs = scipy.fft.fftfreq(2*len(signal.times), d=signal.dt)
if attenuation_interpolation is None:
freqs.sort()
else:
logf_min = np.log10(np.min(freqs[freqs>0]))
logf_max = np.log10(np.max(freqs))
n_steps = int((logf_max - logf_min)
/ attenuation_interpolation)
if (logf_max-logf_min)%attenuation_interpolation:
n_steps += 1
logf = np.logspace(logf_min, logf_max, n_steps+1)
freqs = np.concatenate((-np.flipud(logf), [0], logf))
atten_vals = self.attenuation(freqs)
# Apply fresnel s and p coefficients in addition to attenuation
attenuation_s = lambda f: np.interp(f, freqs, atten_vals) * r_s
attenuation_p = lambda f: np.interp(f, freqs, atten_vals) * r_p
signal_s = signal * pol_s
signal_p = signal * pol_p
signal_s.shift(self.tof)
signal_p.shift(self.tof)
signal_s.filter_frequencies(attenuation_s, force_real=True)
signal_p.filter_frequencies(attenuation_p, force_real=True)
return (signal_s, signal_p), (u_s0, u_p1)
@lazy_property
def coordinates(self):
"""
x, y, and z-coordinates along the path (using dz step).
Coordinates are provided for plotting purposes only, and are not vetted
for use in calculations.
"""
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs, dz = np.linspace(self.z0, self.z1, n_zs+1, retstep=True)
integrand = np.tan(self.theta(zs))
rs = np.zeros(len(integrand))
trap_areas = (integrand[:-1] + np.diff(integrand)/2) * dz
rs[1:] += np.abs(np.cumsum(trap_areas))
else:
n_zs_1 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z0) /
self.dz)
zs_1, dz_1 = np.linspace(self.z0, self.z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
integrand_1 = np.tan(self.theta(zs_1))
n_zs_2 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z1) /
self.dz)
zs_2, dz_2 = np.linspace(self.z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
integrand_2 = np.tan(self.theta(zs_2))
rs_1 = np.zeros(len(integrand_1))
trap_areas = ((integrand_1[:-1] + np.diff(integrand_1)/2) *
np.abs(dz_1))
rs_1[1:] += np.cumsum(trap_areas)
rs_2 = np.zeros(len(integrand_2)) + rs_1[-1]
trap_areas = ((integrand_2[:-1] + np.diff(integrand_2)/2) *
np.abs(dz_2))
rs_2[1:] += np.cumsum(trap_areas)
rs = np.concatenate((rs_1, rs_2[1:]))
zs = np.concatenate((zs_1, zs_2[1:]))
xs = self.from_point[0] + rs*np.cos(self.phi)
ys = self.from_point[1] + rs*np.sin(self.phi)
return xs, ys, zs
class SpecializedRayTracePath(BasicRayTracePath):
"""
Class for representing a single ray-trace solution between points.
Stores parameters of the ray path with calculations performed analytically
(with the exception of attenuation). These calculations require the index
of refraction of the ice to be of the form n(z)=n0-k*exp(a*z). However this
restriction allows for most of the integrations to be performed
analytically. The attenuation is the only attribute which is still
calculated by numerical integration with z-steps of size ``dz``. Most
properties are lazily evaluated to save on computation time. If any
attributes of the class instance are changed, the lazily-evaluated
properties will be cleared.
Parameters
----------
parent_tracer : SpecializedRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
uniformity_factor : float
Factor (<1) of the base index of refraction (n0 in the ice model)
beyond which calculations start to break down numerically.
beta_tolerance : float
``beta`` value (near 0) below which calculations start to break down
numerically.
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
SpecializedRayTracer : Class for calculating the ray-trace solutions
between points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
The requirement that the ice model go as n(z)=n0-k*exp(a*z) is implemented
by requiring the ice model to inherit from `AntarcticIce`. Obviously this
is not fool-proof, but likely the ray tracing will obviously fail if the
index follows a very different functional form.
"""
# Factor of index of refraction at which calculations may break down
uniformity_factor = 0.99999
# Beta value below which calculations may break down
beta_tolerance = 0.005
@lazy_property
def valid_ice_model(self):
"""Whether the ice model being used supports this specialization."""
return ((isinstance(self.ice, type) and
issubclass(self.ice, AntarcticIce))
or isinstance(self.ice, AntarcticIce))
@lazy_property
def z_uniform(self):
"""
Depth (m) beyond which the ice should be treated as uniform.
Calculated based on the ``uniformity_factor``. Necessary due to
numerical rounding issues at indices close to the index limit.
"""
return self.ice.depth_with_index(self.ice.n0 * self.uniformity_factor)
@staticmethod
def _z_int_uniform_correction(z0, z1, z_uniform, beta, ice, integrand,
integrand_kwargs={}, numerical=False, dz=None,
derivative_special_case=False):
"""
Function to perform a z-integration with a uniform ice correction.
Can be an analytic or numerical integration. Takes into account the
effect of treating the ice as uniform beyond some depth.
Parameters
----------
z0 : float
(Negative-valued) depth (m) of the left limit of the integral.
z1 : float
(Negative-valued) depth (m) of the right limit of the integral.
z_uniform : float
(Negative-valued) depth (m) below which the ice is assumed to have
a uniform index.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
integrand_kwargs : dict, optional
A dictionary of keyword arguments to be passed into the `integrand`
function.
numerical : boolean, optional
Whether to use the numerical integral instead of an analytic one.
If ``False`` the analytic integral is calculated. If ``True`` the
numerical integral is calculated.
dz : float, optional
The z-step to use for numerical integration. Only needed when
`numerical` is ``True``.
derivative_special_case : boolean, optional
Boolean controlling whether the special case of doing the distance
integral beta derivative should be used.
Returns
-------
Integral of the given `integrand` along the path from `z0` to `z1`.
"""
# Suppress numpy RuntimeWarnings
with np.errstate(divide='ignore', invalid='ignore'):
if numerical:
if dz is None:
raise ValueError("Argument dz must be specified for "+
"numerical integrals")
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 on same side of z_uniform
n_zs = int(np.abs(z1-z0)/dz)
if n_zs<10:
n_zs = 10
zs = np.linspace(z0, z1, n_zs+1)
return integrand(zs, beta=beta, ice=ice, deep=z0<z_uniform,
**integrand_kwargs)
else:
n_zs_1 = int(np.abs(z_uniform-z0)/dz)
if n_zs_1<10:
n_zs_1 = 10
zs_1 = np.linspace(z0, z_uniform, n_zs_1+1)
n_zs_2 = int(np.abs(z1-z_uniform)/dz)
if n_zs_2<10:
n_zs_2 = 10
zs_2 = np.linspace(z_uniform, z1, n_zs_2+1)
return (integrand(zs_1, beta=beta, ice=ice,
deep=z0<z_uniform,
**integrand_kwargs) +
integrand(zs_2, beta=beta, ice=ice,
deep=z1<z_uniform,
**integrand_kwargs))
# Analytic integrals
int_z0 = integrand(z0, beta, ice, deep=z0<z_uniform,
**integrand_kwargs)
int_z1 = integrand(z1, beta, ice, deep=z1<z_uniform,
**integrand_kwargs)
if not derivative_special_case:
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 on same side of z_uniform
return int_z1 - int_z0
else:
int_diff = (
integrand(z_uniform, beta, ice, deep=True,
**integrand_kwargs) -
integrand(z_uniform, beta, ice, deep=False,
**integrand_kwargs)
)
if z0<z1:
# z0 below z_uniform, z1 above z_uniform
return int_z1 - int_z0 + int_diff
else:
# z0 above z_uniform, z1 below z_uniform
return int_z1 - int_z0 - int_diff
else:
# Deal with special case of doing distance integral beta derivative
# which includes two bounds instead of just giving indef. integral
# FIXME: Somewhat inaccurate, should probably be done differently
z_turn = np.log((ice.n0-beta)/ice.k)/ice.a
if (z0<z_uniform)==(z1<z_uniform)==(z_turn<z_uniform):
# All on same side of z_uniform
return int_z0 + int_z1
else:
int_diff = (
integrand(z_uniform, beta, ice, deep=True,
**integrand_kwargs) -
integrand(z_uniform, beta, ice, deep=False,
**integrand_kwargs)
)
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 below z_uniform, but z_turn above
return int_z0 + int_z1 - 2*int_diff
else:
# z0 or z1 below z_uniform, others above
return int_z0 + int_z1 - int_diff
def z_integral(self, integrand, integrand_kwargs={}, numerical=False):
"""
Calculate the integral of the given integrand.
For the integrand as a function of z, the analytic or numerical
integral is calculated along the ray path.
Parameters
----------
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
integrand_kwargs : dict, optional
A dictionary of keyword arguments to be passed into the `integrand`
function.
numerical : boolean, optional
Whether to use the numerical integral instead of an analytic one.
If ``False`` the analytic integral is calculated. If ``True`` the
numerical integral is calculated.
Returns
-------
float
The value of the integral along the ray path.
Raises
------
TypeError
If the ice model is not valid for the specialized analytic
integrations.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must inherit methods from "+
"pyrex.AntarcticIce")
if self.direct:
return self._z_int_uniform_correction(self.z0, self.z1,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
else:
int_1 = self._z_int_uniform_correction(self.z0, self.z_turn,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
int_2 = self._z_int_uniform_correction(self.z1, self.z_turn,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
return int_1 + int_2
@staticmethod
def _int_terms(z, beta, ice):
"""
Useful pre-calculated substitutions for integrations.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
Returns
-------
alpha : float
``n0``^2 - `beta`^2
n_z : float
Index at depth `z`.
gamma : float
`n_z`^2 - `beta`^2
log_term_1 : float
``n0``*`n_z` - `beta`^2 - sqrt(`alpha`*`gamma`)
log_term_2 : float
`n_z` + sqrt(`gamma`)
"""
alpha = ice.n0**2 - beta**2
n_z = ice.n0 - ice.k*np.exp(ice.a*z)
gamma = n_z**2 - beta**2
# Prevent errors when gamma is a very small negative number due to
# numerical rounding errors. This could cause other problems for cases
# where a not-tiny negative gamma would have meant nans but now leads to
# non-nan values. It appears this only occurs when the launch angle
# is greater than the maximum value allowed in the ray tracer however,
# so it's likely alright. If problems arise, replace with gamma<0 and
# np.isclose(gamma, 0) or similar
gamma = np.where(gamma<0, 0, gamma)
log_term_1 = ice.n0*n_z - beta**2 - np.sqrt(alpha*gamma)
log_term_2 = -n_z - np.sqrt(gamma)
return alpha, n_z, gamma, log_term_1, -log_term_2
@classmethod
def _distance_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating radial distance.
Calculates the indefinite z-integral of tan(arcsin(beta/n(z))), which
between two z values gives the radial distance of the direct path
between the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return beta * z / np.sqrt(alpha)
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
0,
beta / np.sqrt(alpha) * (-z + np.log(log_1)/ice.a))
@classmethod
def _distance_integral_derivative(cls, z, beta, ice, deep=False):
"""
Beta derivative of indefinite z-integral for radial distance.
Calculates the beta derivative of the indefinite z-integral of
tan(arcsin(beta/n(z))), which is used for finding the maximum distance
integral value as a function of launch angle. This function actually
gives the integral from z to the turning point ``z_turn``, since that
is what's needed for finding the peak angle.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral derivative at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
z_turn = np.log((ice.n0-beta)/ice.k)/ice.a
if deep:
if z_turn<ice.valid_range[1]:
return ((np.log((ice.n0-beta)/ice.k)/ice.a - z -
beta/(ice.a*(ice.n0-beta))) / np.sqrt(alpha))
else:
return -z / np.sqrt(alpha)
else:
if z_turn<ice.valid_range[1]:
term_1 = ((1+beta**2/alpha)/np.sqrt(alpha) *
(z + np.log(beta*ice.k/log_1) / ice.a))
term_2 = -(beta**2+ice.n0*n_z) / (ice.a*alpha*np.sqrt(gamma))
else:
term_1 = -(1+beta**2/alpha)/np.sqrt(alpha)*(-z + np.log(log_1) /
ice.a)
term_2 = -((beta*(np.sqrt(alpha)-np.sqrt(gamma)))**2 /
(ice.a*alpha*np.sqrt(gamma)*log_1))
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(ice.valid_range[1], beta, ice)
term_1 += (1+beta**2/alpha)/np.sqrt(alpha)*(np.log(log_1) /
ice.a)
term_2 += ((beta*(np.sqrt(alpha)-np.sqrt(gamma)))**2 /
(ice.a*alpha*np.sqrt(gamma)*log_1))
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
np.inf,
term_1+term_2)
# If the value of the integral just at z is needed (e.g. you want the
# correct values when reflecting off the surface of the ice),
# then use the terms below instead
# Be warned, however, that this gives the wrong value when turning over
# below the surface of the ice. The values get closer if only term_1
# is returned in cases where gamma==0 (turning over in ice),
# though the values are still slightly off
# if deep:
# return z / np.sqrt(alpha)
# term_1 = (1+beta**2/alpha)/np.sqrt(alpha)*(-z + np.log(log_1) / ice.a)
# term_2 = ((beta*(np.sqrt(alpha)-np.sqrt(gamma)))**2 /
# (ice.a*alpha*np.sqrt(gamma)*log_1))
# return np.where(gamma==0, term_1, term_1+term_2)
@classmethod
def _pathlen_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating path length.
Calculates the indefinite z-integral of sec(arcsin(beta/n(z))), which
between two z values gives the path length of the direct path between
the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return ice.n0 * z / np.sqrt(alpha)
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
z,
(ice.n0/np.sqrt(alpha) * (-z + np.log(log_1)/ice.a)
+ np.log(log_2) / ice.a))
@classmethod
def _tof_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating time of flight.
Calculates the indefinite z-integral of n(z)/c*sec(arcsin(beta/n(z))),
which between two z values gives the time of flight of the direct path
between the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return (ice.n0*(n_z+ice.n0*(ice.a*z-1))
/ (ice.a*np.sqrt(alpha)*scipy.constants.c))
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
((n_z-ice.n0)/ice.a + ice.n0*z) / scipy.constants.c,
(((np.sqrt(gamma) + ice.n0*np.log(log_2) +
ice.n0**2*np.log(log_1)/np.sqrt(alpha))/ice.a) -
z*ice.n0**2/np.sqrt(alpha)) / scipy.constants.c)
@classmethod
def _attenuation_integral_def(cls, zs, f, beta, ice, deep=False):
"""
Definite z-integral for calculating attenuation.
Calculates the definite z-integral of sec(arcsin(beta/n(z)))/A(z,f),
which between two z values gives the path length over attenuation length
of the direct path between the z values.
Parameters
----------
zs : array_like
(Negative-valued) depths (m) in the ice.
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the definite integral along `zs`.
"""
fa = np.abs(f)
if deep or np.isclose(beta, 0, atol=cls.beta_tolerance):
int_var = zs
partial_integrand = 1 / np.cos(np.arcsin(beta/ice.index(zs)))
else:
# When approaching z_turn, the usual integrand approaches infinity.
# In that case make the change of variables below to fix it.
# The assumption now is that z_turn is always above z_uniform,
# which is valid for most realistic detector configurations.
int_var = np.sqrt(1 - (beta/ice.index(zs))**2)
partial_integrand = (ice.index(zs)**3 / beta**2 /
(-ice.k*ice.a*np.exp(ice.a*zs)))
alen = ice.attenuation_length(zs, fa)
integrand = (partial_integrand / alen.T).T
return np.trapz(integrand, x=int_var, axis=0)
@lazy_property
def path_length(self):
"""Length (m) of the ray path."""
return np.abs(self.z_integral(self._pathlen_integral))
@lazy_property
def tof(self):
"""Time of flight (s) along the ray path."""
return np.abs(self.z_integral(self._tof_integral))
def attenuation(self, f):
"""
Calculate the attenuation factor for signal frequencies.
Calculates the attenuation factor to be multiplied by the signal
amplitude at the given frequencies. Uses numerical integration since
frequency dependence causes there to be no analytic form.
Parameters
----------
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
Returns
-------
array_like
Attenuation factors for the signal at the frequencies `f`.
"""
return np.exp(-np.abs(self.z_integral(
self._attenuation_integral_def,
integrand_kwargs={'f': f},
numerical=True
)))
@lazy_property
def coordinates(self):
"""
x, y, and z-coordinates along the path (using dz step).
Coordinates are provided for plotting purposes only, and are not vetted
for use in calculations.
"""
def r_int(z0, z1s):
return np.array([self._z_int_uniform_correction(
z0, z, self.z_uniform, self.beta, self.ice,
self._distance_integral
)
for z in z1s])
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs = np.linspace(self.z0, self.z1, n_zs+1)
rs = r_int(self.z0, zs)
rs *= np.sign(np.cos(self.theta0))
else:
n_zs_1 = int(np.abs(self.z_turn-self.z0)/self.dz)
zs_1 = np.linspace(self.z0, self.z_turn, n_zs_1, endpoint=False)
rs_1 = r_int(self.z0, zs_1)
r_turn = r_int(self.z0, np.array([self.z_turn]))[0]
n_zs_2 = int(np.abs(self.z_turn-self.z1)/self.dz)
zs_2 = np.linspace(self.z_turn, self.z1, n_zs_2+1)
rs_2 = r_turn - r_int(self.z_turn, zs_2)
rs = np.concatenate((rs_1, rs_2))
zs = np.concatenate((zs_1, zs_2))
xs = self.from_point[0] + rs*np.cos(self.phi)
ys = self.from_point[1] + rs*np.sin(self.phi)
return xs, ys, zs
class BasicRayTracer(LazyMutableClass):
"""
Class for calculating the ray-trace solutions between points.
Calculations performed by integrating z-steps of size ``dz``. Most
properties are lazily evaluated to save on computation time. If any
attributes of the class instance are changed, the lazily-evaluated
properties will be cleared.
Parameters
----------
from_point : array_like
Vector starting point of the ray path.
to_point : array_like
Vector ending point of the ray path.
ice_model : optional
The ice model used for the ray tracer.
dz : float, optional
The z-step (m) to be used for integration of the ray path attributes.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
solution_class
Class to be used for each ray-trace solution path.
exists
expected_solutions
solutions
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
BasicRayTracePath : Class for representing a single ray-trace solution
between points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
solution_class = BasicRayTracePath
def __init__(self, from_point, to_point, ice_model=ice, dz=1):
self.from_point = np.array(from_point)
self.to_point = np.array(to_point)
self.ice = ice_model
self.dz = dz
super().__init__()
@property
def z_turn_proximity(self):
"""
Parameter for how closely path approaches z_turn.
Necessary to avoid diverging integrals which occur at z_turn.
"""
# Best value of dz/10 determined empirically by checking errors
return self.dz/10
# Calculations performed as if launching from low to high
@property
def z0(self):
"""
Depth (m) of the lower endpoint.
Ray tracing performed as if launching from lower point to higher point,
since the only difference in the paths produced is a time reversal.
This is the depth of the assumed launching point.
"""
return min([self.from_point[2], self.to_point[2]])
@property
def z1(self):
"""
Depth (m) of the higher endpoint.
Ray tracing performed as if launching from lower point to higher point,
since the only difference in the paths produced is a time reversal.
This is the depth of the assumed receiving point.
"""
return max([self.from_point[2], self.to_point[2]])
@lazy_property
def n0(self):
"""Index of refraction of the ice at the lower endpoint."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def max_angle(self):
"""Maximum possible launch angle that could connect the endpoints."""
return np.arcsin(self.ice.index(self.z1)/self.n0)
@lazy_property
def peak_angle(self):
"""
Angle at which the indirect solutions curve (in r vs angle) peaks.
This angle separates the angle intervals to be used for indirect
solution root-finding.
"""
for tolerance in np.logspace(-12, -4, num=3):
for angle_step in np.logspace(-3, 0, num=4):
r_func = (lambda angle, brent_arg:
self._indirect_r_prime(angle, brent_arg,
d_angle=angle_step))
try:
peak_angle = self.angle_search(0, r_func,
angle_step, self.max_angle,
tolerance=tolerance)
except (RuntimeError, ValueError):
# Failed to converge
continue
else:
if peak_angle>np.pi/2:
peak_angle = np.pi - peak_angle
return peak_angle
# If all else fails, just use the max_angle
return self.max_angle
@lazy_property
def direct_r_max(self):
"""Maximum r value of direct ray solutions."""
z_turn = self.ice.depth_with_index(self.n0 * np.sin(self.max_angle))
return self._direct_r(self.max_angle,
force_z1=z_turn-self.z_turn_proximity)
@lazy_property
def indirect_r_max(self):
"""Maximum r value of indirect ray solutions."""
return self._indirect_r(self.peak_angle)
@lazy_property
def exists(self):
"""Boolean of whether any paths exist between the endpoints."""
return True in self.expected_solutions
@lazy_property
def expected_solutions(self):
"""
List of which types of solutions are expected to exist.
The first element of the list represents the direct path, the second
element represents the indirect path with a launch angle greater than
the peak angle, and the third element represents the indirect path with
a launch angle less than the peak angle.
"""
if not(self.ice.contains(self.from_point) and
self.ice.contains(self.to_point)):
return [False, False, False]
if self.rho<self.direct_r_max:
return [True, False, True]
elif self.rho<self.indirect_r_max:
return [False, True, True]
else:
return [False, False, False]
@lazy_property
def solutions(self):
"""
List of existing rays between the two points.
This list should have zero elements if there are no possible paths
between the endpoints or two elements otherwise, representing the
more direct and the less direct paths, respectively.
"""
angles = [
self.direct_angle,
self.indirect_angle_1,
self.indirect_angle_2
]
return [self.solution_class(self, angle, direct=(i==0))
for i, angle, exists in zip(range(3), angles,
self.expected_solutions)
if exists and angle is not None]
def _direct_r(self, angle, brent_arg=0, force_z1=None):
"""
Calculate the r distance of the direct ray for a given launch angle.
Parameters
----------
angle : float
Launch angle (radians) of a direct ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
force_z1 : float or None, optional
Value to use for the ``z1`` receiving depth. If ``None``, the
``z1`` property of the class will be used. Useful for changing the
integration limits to integrate to the turning point instead.
Returns
-------
float
Value of the radial distance integral minus the `brent_arg`.
"""
if force_z1 is not None:
z1 = force_z1
else:
z1 = self.z1
n_zs = int(np.abs((z1-self.z0)/self.dz))
zs, dz = np.linspace(self.z0, z1, n_zs+1, retstep=True)
integrand = np.tan(np.arcsin(np.sin(angle) *
self.n0/self.ice.index(zs)))
return np.trapz(integrand, dx=dz) - brent_arg
def _indirect_r(self, angle, brent_arg=0):
"""
Calculate the r distance of the indirect ray for a given launch angle.
Parameters
----------
angle : float
Launch angle (radians) of an indirect ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
Returns
-------
float
Value of the radial distance integral minus the `brent_arg`.
"""
z_turn = self.ice.depth_with_index(self.n0 * np.sin(angle))
n_zs_1 = int(np.abs((z_turn-self.z_turn_proximity-self.z0)/self.dz))
zs_1, dz_1 = np.linspace(self.z0, z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
integrand_1 = np.tan(np.arcsin(np.sin(angle) *
self.n0/self.ice.index(zs_1)))
n_zs_2 = int(np.abs((z_turn-self.z_turn_proximity-self.z1)/self.dz))
zs_2, dz_2 = np.linspace(z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
integrand_2 = np.tan(np.arcsin(np.sin(angle) *
self.n0/self.ice.index(zs_2)))
return (np.trapz(integrand_1, dx=dz_1) +
np.trapz(integrand_2, dx=-dz_2)) - brent_arg
def _indirect_r_prime(self, angle, brent_arg=0, d_angle=0.001):
"""
Calculate the r distance derivative of the indirect ray.
Parameters
----------
angle : float
Launch angle (radians) of an indirect ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
d_angle : float, optional
Difference in angle to use for calculation of the derivative.
Returns
-------
float
Value of the numerical derivative of the radial distance integral,
minus the `brent_arg`.
"""
return ((self._indirect_r(angle) - self._indirect_r(angle-d_angle))
/ d_angle) - brent_arg
def _get_launch_angle(self, r_function, min_angle=0, max_angle=np.pi/2):
"""
Calculates the launch angle for a ray with the given r_function.
Finds the root of the given r function as a function of angle to
determine the corresponding launch angle.
Parameters
----------
r_function : function
Function to calculate the radial distance for a given launch angle.
min_angle : float, optional
Minimum allowed angle for the `r_function`'s root.
max_angle : float, optional
Maximum allowed angle for the `r_function`'s root.
Returns
-------
float or None
True launch angle (radians) of the path corresponding to the
`r_function`. True launch angle means launches from ``from_point``
rather than from ``z0``. Value is ``None`` if the root finder was
unable to converge.
"""
try:
launch_angle = self.angle_search(self.rho, r_function,
min_angle, max_angle)
except RuntimeError:
# Failed to converge
launch_angle = None
except ValueError:
logger.error("Error calculating launch angle between %s and %s",
self.from_point, self.to_point)
raise
# Convert to true launch angle from self.from_point
# rather than from lower point (self.z0)
return np.arcsin(np.sin(launch_angle) *
self.n0 / self.ice.index(self.from_point[2]))
@lazy_property
def direct_angle(self):
"""Launch angle (radians) of the direct ray."""
if self.expected_solutions[0]:
launch_angle = self._get_launch_angle(self._direct_r,
max_angle=self.max_angle)
if self.from_point[2] > self.to_point[2]:
launch_angle = np.pi - launch_angle
return launch_angle
else:
return None
@lazy_property
def indirect_angle_1(self):
"""
Launch angle (radians) of the first indirect ray.
The first indirect ray is the indirect ray where the launch angle is
greater than the peak angle.
"""
if self.expected_solutions[1]:
return self._get_launch_angle(self._indirect_r,
min_angle=self.peak_angle,
max_angle=self.max_angle)
else:
return None
@lazy_property
def indirect_angle_2(self):
"""
Launch angle (radians) of the second indirect ray.
The second indirect ray is the indirect ray where the launch angle is
less than the peak angle.
"""
if self.expected_solutions[2]:
if self.expected_solutions[1]:
max_angle = self.peak_angle
else:
max_angle = self.max_angle
return self._get_launch_angle(self._indirect_r,
max_angle=max_angle)
else:
return None
@staticmethod
def angle_search(true_r, r_function, min_angle, max_angle,
tolerance=1e-12, max_iterations=100):
"""
Calculates the angle where `r_function` (angle) == `true_r`.
Runs the brentq root-finding algorithm on `r_function` with an offset
of `true_r` to find the angle at which they are equal.
Parameters
----------
true_r : float
Desired value for the radial distance.
r_function : function
Function to calculate the radial distance for a given launch angle.
min_angle : float
Minimum allowed angle for the `r_function`.
max_angle : float
Maximum allowed angle for the `r_function`.
tolerance : float, optional
Tolerance in the root value for convergence.
max_iterations : int, optional
Maximum number of iterations the root finder will attempt.
Returns
-------
float
The launch angle which will satisfy the condition
`r_function` (angle) == `true_r`.
Raises
------
RuntimeError
If the root finder doesn't converge.
"""
return scipy.optimize.brentq(r_function, min_angle, max_angle,
args=(true_r), xtol=tolerance,
maxiter=max_iterations)
class SpecializedRayTracer(BasicRayTracer):
"""
Class for calculating the ray-trace solutions between points.
Calculations in this class require the index of refraction of the ice to be
of the form n(z)=n0-k*exp(a*z). However this restriction allows for most of
the integrations to be performed analytically. Most properties are lazily
evaluated to save on computation time. If any attributes of the class
instance are changed, the lazily-evaluated properties will be cleared.
Parameters
----------
from_point : array_like
Vector starting point of the ray path.
to_point : array_like
Vector ending point of the ray path.
ice_model : optional
The ice model used for the ray tracer.
dz : float, optional
The z-step (m) to be used for integration of the ray path attributes.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
solution_class
Class to be used for each ray-trace solution path.
exists
expected_solutions
solutions
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
SpecializedRayTracePath : Class for representing a single ray-trace
solution between points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
The requirement that the ice model go as n(z)=n0-k*exp(a*z) is implemented
by requiring the ice model to inherit from `AntarcticIce`. Obviously this
is not fool-proof, but likely the ray tracing will obviously fail if the
index follows a very different functional form.
"""
solution_class = SpecializedRayTracePath
@lazy_property
def valid_ice_model(self):
"""Whether the ice model being used supports this specialization."""
return ((isinstance(self.ice, type) and
issubclass(self.ice, AntarcticIce))
or isinstance(self.ice, AntarcticIce))
@lazy_property
def z_uniform(self):
"""
Depth (m) beyond which the ice should be treated as uniform.
Calculated based on the ``uniformity_factor`` of the
``solution_class``. Necessary due to numerical rounding issues at
indices close to the index limit.
"""
return self.ice.depth_with_index(self.ice.n0 *
self.solution_class.uniformity_factor)
@lazy_property
def direct_r_max(self):
"""Maximum r value of direct ray solutions."""
return self._direct_r(self.max_angle)
def _r_distance(self, theta, z0, z1):
"""
Calculate the r distance between depths for a given launch angle.
Parameters
----------
theta : float
Launch angle (radians) of a ray path.
z0 : float
(Negative-valued) first depth (m) in the ice.
z1 : float
(Negative-valued) second depth (m) in the ice.
Returns
-------
float
Value of the radial distance integral between `z0` and `z1`.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must inherit methods from "+
"pyrex.AntarcticIce")
beta = np.sin(theta) * self.n0
return self.solution_class._z_int_uniform_correction(
z0, z1, self.z_uniform, beta, self.ice,
self.solution_class._distance_integral
)
def _r_distance_derivative(self, theta, z0, z1):
"""
Calculate the derivative of the r distance between depths for an angle.
Parameters
----------
theta : float
Launch angle (radians) of a ray path.
z0 : float
(Negative-valued) first depth (m) in the ice.
z1 : float
(Negative-valued) second depth (m) in the ice.
Returns
-------
float
Value of the derivative of the radial distance integral between
`z0` and `z1`.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must inherit methods from "+
"pyrex.AntarcticIce")
beta = np.sin(theta) * self.n0
beta_prime = np.cos(theta) * self.n0
return beta_prime * self.solution_class._z_int_uniform_correction(
z0, z1, self.z_uniform, beta, self.ice,
self.solution_class._distance_integral_derivative,
derivative_special_case=True
)
def _direct_r(self, angle, brent_arg=0, force_z1=None):
"""
Calculate the r distance of the direct ray for a given launch angle.
Parameters
----------
angle : float
Launch angle (radians) of a direct ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
force_z1 : float or None, optional
Value to use for the ``z1`` receiving depth. If ``None``, the
``z1`` property of the class will be used. Useful for changing the
integration limits to integrate to the turning point instead.
Returns
-------
float
Value of the radial distance integral minus the `brent_arg`.
"""
if force_z1 is not None:
z1 = force_z1
else:
z1 = self.z1
return self._r_distance(angle, self.z0, z1) - brent_arg
def _indirect_r(self, angle, brent_arg=0, link_range=1e-6):
"""
Calculate the r distance of the indirect ray for a given launch angle.
Parameters
----------
angle : float
Launch angle (radians) of an indirect ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
link_range : float, optional
Angular range from `max_angle` over which the indirect ray distance
is adjusted so that it linearly approaches the maximum direct ray
distance at `max_angle`.
Returns
-------
float
Value of the radial distance integral minus the `brent_arg`.
"""
z_turn = self.ice.depth_with_index(self.n0 * np.sin(angle))
link_angle = self.max_angle - link_range
if angle>link_angle:
link_dist = (self._r_distance(link_angle, self.z0, z_turn) +
self._r_distance(link_angle, self.z1, z_turn))
slope = (link_dist - self.direct_r_max) / link_range
dist = self.direct_r_max + slope * (self.max_angle - angle)
return dist - brent_arg
else:
dist = (self._r_distance(angle, self.z0, z_turn) +
self._r_distance(angle, self.z1, z_turn))
return dist - brent_arg
def _indirect_r_prime(self, angle, brent_arg=0):
"""
Calculate the r distance derivative of the indirect ray.
Parameters
----------
angle : float
Launch angle (radians) of an indirect ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
Returns
-------
float
Value of the derivative of the radial distance integral minus the
`brent_arg`.
"""
return self._r_distance_derivative(angle, self.z0, self.z1) - brent_arg
@lazy_property
def peak_angle(self):
"""
Angle at which the indirect solutions curve (in r vs angle) peaks.
This angle separates the angle intervals to be used for indirect
solution root-finding.
"""
try:
peak_angle = self.angle_search(0, self._indirect_r_prime,
0, self.max_angle)
except ValueError:
# _indirect_r_prime(0) and _indirect_r_prime(max_angle) have the
# same sign -> no true peak angle
return self.max_angle
except RuntimeError:
# Failed to converge
return None
else:
if peak_angle>np.pi/2:
peak_angle = np.pi - peak_angle
return peak_angle
class UniformRayTracePath(LazyMutableClass):
"""
Class for representing a single ray solution in uniform ice.
Stores parameters of the ray path through uniform ice. Most properties are
lazily evaluated to save on computation time. If any attributes of the
class instance are changed, the lazily-evaluated properties will be
cleared.
Parameters
----------
parent_tracer : UniformRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
reflections : int
Number of reflections made by the ray path at boundaries of the ice.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
direct : boolean
Whether the ray path is direct (does not reflect).
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
UniformRayTracer : Class for calculating ray solutions in uniform ice.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
def __init__(self, parent_tracer, launch_angle, reflections):
self.from_point = parent_tracer.from_point
self.to_point = parent_tracer.to_point
self.theta0 = launch_angle
self.ice = parent_tracer.ice
self.direct = reflections==0
self._reflections = reflections
super().__init__()
@lazy_property
def _points(self):
"""Relevant points along the path."""
if self.direct:
return np.asarray([self.from_point, self.to_point])
else:
points = np.zeros((self._reflections+2, 3))
points[0] = self.from_point
dzs = []
if self.theta0>0:
initial_direction = 1
elif self.theta0<0:
initial_direction = -1
else:
raise ValueError("Invalid initial direction")
if initial_direction==1:
dzs.append(self.ice.valid_range[1]-self.z0)
else:
dzs.append(self.z0-self.ice.valid_range[0])
size = self.ice.valid_range[1] - self.ice.valid_range[0]
dzs.extend([size]*(self._reflections-1))
final_direction = initial_direction * (-1)**self._reflections
if final_direction==1:
dzs.append(self.z1-self.ice.valid_range[0])
else:
dzs.append(self.ice.valid_range[1]-self.z1)
drs = self.rho * np.asarray(dzs)/np.sum(dzs)
rs = np.cumsum(drs)
points[1:, 0] = rs * np.cos(self.phi)
points[1:, 1] = rs * np.sin(self.phi)
for i in range(self._reflections):
dirn = ((initial_direction * (-1)**i)+1)//2
points[i+1, 2] = self.ice.valid_range[dirn]
points[-1] = self.to_point
return points
@property
def valid_ice_model(self):
"""Whether the ice model being used is supported."""
return isinstance(self.ice, UniformIce)
@property
def z0(self):
"""Depth (m) of the launching point."""
return self.from_point[2]
@property
def z1(self):
"""Depth (m) of the receiving point."""
return self.to_point[2]
@lazy_property
def n0(self):
"""Index of refraction of the ice at the launching point."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance (m) between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def phi(self):
"""Azimuthal angle (radians) between the endpoints."""
u = self.to_point - self.from_point
return np.arctan2(u[1], u[0])
@lazy_property
def emitted_direction(self):
"""Direction in which ray is emitted."""
if self.direct and np.array_equal(self.from_point, self.to_point):
return np.array([0, 0, 1])
return normalize(self._points[1] - self._points[0])
@lazy_property
def received_direction(self):
"""Direction ray is travelling when it is received."""
if self.direct and np.array_equal(self.from_point, self.to_point):
return np.array([0, 0, 1])
return normalize(self._points[-1] - self._points[-2])
@lazy_property
def path_length(self):
"""Length (m) of the ray path."""
if not self.valid_ice_model:
raise TypeError("Ice model must be uniform ice")
return np.sum([np.sqrt(np.sum((p2-p1)**2))
for p1, p2 in zip(self._points[:-1], self._points[1:])])
@lazy_property
def tof(self):
"""Time of flight (s) along the ray path."""
return self.n0 * self.path_length / scipy.constants.c
@lazy_property
def fresnel(self):
"""
Fresnel factors for reflections off the ice boundaries.
The fresnel reflectances are calculated as the square root (ratio of
amplitudes, not powers). Stores the s and p polarized factors,
respectively.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must be uniform ice")
r_s = 1
r_p = 1
n_1 = self.n0
if len(self._points)<3:
return r_s, r_p
for p1, p2 in zip(self._points[:-2], self._points[1:-1]):
if p2[2]==self.ice.valid_range[0]:
n_2 = self.ice.index_below
elif p2[2]==self.ice.valid_range[1]:
n_2 = self.ice.index_above
else:
raise ValueError("Intermediate points don't reflect off the "+
"ice boundaries")
dr = np.sqrt(np.sum((p2[:2]-p1[:2])**2))
dz = np.abs(p2[2]-p1[2])
theta_1 = np.arctan(dr/dz)
cos_1 = np.cos(theta_1)
sin_2 = n_1/n_2*np.sin(theta_1)
if sin_2<=1:
cos_2 = np.sqrt(1 - (sin_2)**2)
else:
cos_2 = np.sqrt((sin_2)**2 - 1)*1j
# TODO: Confirm sign convention here
r_s *= (n_1*cos_1 - n_2*cos_2) / (n_1*cos_1 + n_2*cos_2)
r_p *= (n_2*cos_1 - n_1*cos_2) / (n_2*cos_1 + n_1*cos_2)
return r_s, r_p
def attenuation(self, f, dz=1):
"""
Calculate the attenuation factor for signal frequencies.
Calculates the attenuation factor to be multiplied by the signal
amplitude at the given frequencies.
Parameters
----------
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
dz : float, optional
Step size in z to divide the ice. Actual step size will not be
exactly this value, but is guaranteed to be less than the given
value.
Returns
-------
array_like
Attenuation factors for the signal at the frequencies `f`.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must be uniform ice")
fa = np.abs(f)
attens = np.ones(fa.shape)
for p1, p2 in zip(self._points[:-1], self._points[1:]):
if p1[2]==p2[2]:
dp = np.sqrt(np.sum((p2-p1)**2))
zs = np.array([p1[2]])
else:
dpdz = (p2-p1)/(p2[2]-p1[2])
n_steps = int(np.abs(p2[2]-p1[2]) / dz) + 2
zs, dz_true = np.linspace(p1[2], p2[2], n_steps,
endpoint=False, retstep=True)
dp = np.sqrt(np.sum((dpdz*dz_true)**2))
alens = self.ice.attenuation_length(zs, fa)
attens *= np.prod(np.exp(-dp/alens), axis=0)
return attens
def propagate(self, signal=None, polarization=None):
"""
Propagate the signal with optional polarization along the ray path.
Applies the frequency-dependent signal attenuation along the ray path
and shifts the times according to the ray time of flight. Additionally
provides the s and p polarization directions.
Parameters
----------
signal : Signal, optional
``Signal`` object to propagate.
polarization : array_like, optional
Vector representing the linear polarization of the `signal`.
Returns
-------
tuple of Signal
Tuple of ``Signal`` objects representing the s and p polarizations
of the original `signal` attenuated along the ray path. Only
returned if `signal` was not ``None``.
tuple of ndarray
Tuple of polarization vectors representing the s and p polarization
directions of the `signal` at the end of the ray path. Only
returned if `polarization` was not ``None``.
See Also
--------
pyrex.Signal : Base class for time-domain signals.
"""
if polarization is None:
if signal is None:
return
else:
new_signal = signal.copy()
new_signal.shift(self.tof)
new_signal.filter_frequencies(self.attenuation)
return new_signal
else:
# Unit vectors perpendicular and parallel to plane of incidence
# at the launching point
u_s0 = normalize(np.cross(self.emitted_direction, [0, 0, 1]))
u_p0 = normalize(np.cross(u_s0, self.emitted_direction))
# Unit vector parallel to plane of incidence at the receiving point
# (perpendicular vector stays the same)
u_p1 = normalize(np.cross(u_s0, self.received_direction))
if signal is None:
return (u_s0, u_p1)
else:
# Amplitudes of s and p components
pol_s = np.dot(polarization, u_s0)
pol_p = np.dot(polarization, u_p0)
# Fresnel reflectances of s and p components
r_s, r_p = self.fresnel
# Apply fresnel s and p coefficients in addition to attenuation
attenuation_s = lambda freqs: self.attenuation(freqs) * r_s
attenuation_p = lambda freqs: self.attenuation(freqs) * r_p
signal_s = signal * pol_s
signal_p = signal * pol_p
signal_s.shift(self.tof)
signal_p.shift(self.tof)
signal_s.filter_frequencies(attenuation_s, force_real=True)
signal_p.filter_frequencies(attenuation_p, force_real=True)
return (signal_s, signal_p), (u_s0, u_p1)
@lazy_property
def coordinates(self):
"""
x, y, and z-coordinates along the path.
Coordinates are only calculated at ice layer boundaries, as the path
is assumed to be straight within an ice layer.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must be uniform ice")
xs = np.array([p[0] for p in self._points])
ys = np.array([p[1] for p in self._points])
zs = np.array([p[2] for p in self._points])
return xs, ys, zs
class UniformRayTracer(LazyMutableClass):
"""
Class for calculating ray solutions in uniform ice.
Calculations performed using straight-line paths. Most properties are
lazily evaluated to save on computation time. If any attributes of the
class instance are changed, the lazily-evaluated properties will be
cleared.
Parameters
----------
from_point : array_like
Vector starting point of the ray path.
to_point : array_like
Vector ending point of the ray path.
ice_model
The ice model used for the ray tracer.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
ice
The ice model used for the ray tracer.
solution_class
Class to be used for each ray-trace solution path.
exists
expected_solutions
solutions
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
UniformRayTracePath : Class for representing a single ray solution in
uniform ice.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
solution_class = UniformRayTracePath
max_reflections = 0
def __init__(self, from_point, to_point, ice_model):
self.from_point = np.array(from_point)
self.to_point = np.array(to_point)
self.ice = ice_model
super().__init__()
@property
def valid_ice_model(self):
"""Whether the ice model being used is supported."""
return isinstance(self.ice, UniformIce)
@property
def z0(self):
"""Depth (m) of the launching point."""
return self.from_point[2]
@property
def z1(self):
"""Depth (m) of the receiving point."""
return self.to_point[2]
@lazy_property
def n0(self):
"""Index of refraction of the ice at the starting endpoint."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance (m) between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def phi(self):
"""Azimuthal angle (radians) between the endpoints."""
u = self.to_point - self.from_point
return np.arctan2(u[1], u[0])
@lazy_property
def exists(self):
"""
Boolean of whether any paths exist between the endpoints.
Paths are deemed invalid if at least one of the endpoints is outside of
the allowed ice range.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must be uniform ice")
return (self.ice.valid_range[0]<=self.z0<=self.ice.valid_range[1] and
self.ice.valid_range[0]<=self.z1<=self.ice.valid_range[1])
def _reflected_path(self, reflections, initial_direction):
"""
Generate reflected path for given parameters.
Path will have the given number of reflections and the given initial
direction (+1 for upward, -1 for downward).
"""
if not self.valid_ice_model:
raise TypeError("Ice model must be uniform ice")
if reflections<1:
raise ValueError("Number of reflections must be one or larger")
if (self.ice._index_above is None and
(reflections>1 or initial_direction==1)):
raise TypeError("Reflections not allowed off the upper surface")
if (self.ice._index_below is None and
(reflections>1 or initial_direction==-1)):
raise TypeError("Reflections not allowed off the lower surface")
points = | np.zeros((reflections+2, 3)) | numpy.zeros |
"""Contains most of the methods that compose the ORIGIN software."""
import itertools
import logging
import warnings
from datetime import datetime
from functools import wraps
from time import time
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Gaussian1D
from astropy.nddata import overlap_slices
from astropy.stats import (
gaussian_fwhm_to_sigma,
gaussian_sigma_to_fwhm,
sigma_clipped_stats,
)
from astropy.table import Column, Table, join
from astropy.stats import sigma_clip
from astropy.utils.exceptions import AstropyUserWarning
from joblib import Parallel, delayed
from mpdaf.obj import Image
from mpdaf.tools import progressbar
from numpy import fft
from numpy.linalg import multi_dot
from scipy import fftpack, stats
from scipy.interpolate import interp1d
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import label as ndi_label
from scipy.ndimage import maximum_filter
from scipy.signal import fftconvolve
from scipy.sparse.linalg import svds
from scipy.spatial import ConvexHull, cKDTree
from .source_masks import gen_source_mask
__all__ = (
'add_tglr_stat',
'compute_deblended_segmap',
'Compute_GreedyPCA',
'compute_local_max',
'compute_segmap_gauss',
'compute_thresh_gaussfit',
'Compute_threshold_purity',
'compute_true_purity',
'Correlation_GLR_test',
'create_masks',
'estimation_line',
'merge_similar_lines',
'purity_estimation',
'spatial_segmentation',
'spatiospectral_merging',
'unique_sources',
)
def timeit(f):
"""Decorator which prints the execution time of a function."""
@wraps(f)
def timed(*args, **kw):
logger = logging.getLogger(__name__)
t0 = time()
result = f(*args, **kw)
logger.debug('%s executed in %0.1fs', f.__name__, time() - t0)
return result
return timed
def orthogonal_projection(a, b):
"""Compute the orthogonal projection: a.(a^T.a)-1.a^T.b
NOTE: does not include the (a^T.a)-1 term as it is often not needed (when
a is already normalized).
"""
# Using multi_dot which is faster than np.dot(np.dot(a, a.T), b)
# Another option would be to use einsum, less readable but also very
# fast with Numpy 1.14+ and optimize=True. This seems to be as fast as
# multi_dot.
# return np.einsum('i,j,jk->ik', a, a, b, optimize=True)
if a.ndim == 1:
a = a[:, None]
return multi_dot([a, a.T, b])
@timeit
def spatial_segmentation(Nx, Ny, NbSubcube, start=None):
"""Compute indices to split spatially in NbSubcube x NbSubcube regions.
Each zone is computed from the left to the right and the top to the bottom
First pixel of the first zone has coordinates : (row,col) = (Nx,1).
Parameters
----------
Nx : int
Number of columns
Ny : int
Number of rows
NbSubcube : int
Number of subcubes for the spatial segmentation
start : tuple
if not None, the tupe is the (y,x) starting point
Returns
-------
intx, inty : int, int
limits in pixels of the columns/rows for each zone
"""
# Segmentation of the rows vector in Nbsubcube parts from right to left
inty = np.linspace(Ny, 0, NbSubcube + 1, dtype=np.int)
# Segmentation of the columns vector in Nbsubcube parts from left to right
intx = np.linspace(0, Nx, NbSubcube + 1, dtype=np.int)
if start is not None:
inty += start[0]
intx += start[1]
return inty, intx
def DCTMAT(nl, order):
"""Return the DCT transformation matrix of size nl-by-(order+1).
Equivalent function to Matlab/Octave's dtcmtx.
https://octave.sourceforge.io/signal/function/dctmtx.html
Parameters
----------
order : int
Order of the DCT (spectral length).
Returns
-------
array: DCT Matrix
"""
yy, xx = np.mgrid[:nl, : order + 1]
D0 = np.sqrt(2 / nl) * np.cos((yy + 0.5) * (np.pi / nl) * xx)
D0[:, 0] *= 1 / np.sqrt(2)
return D0
@timeit
def dct_residual(w_raw, order, var, approx, mask):
"""Function to compute the residual of the DCT on raw data.
Parameters
----------
w_raw : array
Data array.
order : int
The number of atom to keep for the DCT decomposition.
var : array
Variance array.
approx : bool
If True, an approximate computation is used, not taking the variance
into account.
Returns
-------
Faint, cont : array
Residual and continuum estimated from the DCT decomposition.
"""
nl = w_raw.shape[0]
D0 = DCTMAT(nl, order)
shape = w_raw.shape[1:]
nspec = np.prod(shape)
if approx:
# Compute the DCT transformation, without using the variance.
#
# Given the transformation matrix D0, we compute for each spectrum S:
#
# C = D0.D0^t.S
#
# Old version using tensordot:
# A = np.dot(D0, D0.T)
# cont = np.tensordot(A, w_raw, axes=(0, 0))
# Looping on spectra and using multidot is ~6x faster:
# D0 is typically 3681x11 elements, so it is much more efficient
# to compute D0^t.S first (note the array is reshaped below)
cont = [
multi_dot([D0, D0.T, w_raw[:, y, x]])
for y, x in progressbar(np.ndindex(shape), total=nspec)
]
# For reference, this is identical to the following scipy version,
# though scipy is 2x slower than tensordot (probably because it
# computes all the coefficients)
# from scipy.fftpack import dct
# w = (np.arange(nl) < (order + 1)).astype(int)
# cont = dct(dct(w_raw, type=2, norm='ortho', axis=0) * w[:,None,None],
# type=3, norm='ortho', axis=0, overwrite_x=False)
else:
# Compute the DCT transformation, using the variance.
#
# As the noise differs on each spectral component, we need to take into
# account the (diagonal) covariance matrix Σ for each spectrum S:
#
# C = D0.(D^t.Σ^-1.D)^-1.D0^t.Σ^-1.S
#
w_raw_var = w_raw / var
D0T = D0.T
# Old version (slow):
# def continuum(D0, D0T, var, w_raw_var):
# A = np.linalg.inv(np.dot(D0T / var, D0))
# return np.dot(np.dot(np.dot(D0, A), D0T), w_raw_var)
#
# cont = Parallel()(
# delayed(continuum)(D0, D0T, var[:, i, j], w_raw_var[:, i, j])
# for i in range(w_raw.shape[1]) for j in range(w_raw.shape[2]))
# cont = np.asarray(cont).T.reshape(w_raw.shape)
# map of valid spaxels, i.e. spaxels with at least one valid value
valid = ~np.any(mask, axis=0)
from numpy.linalg import inv
cont = []
for y, x in progressbar(np.ndindex(shape), total=nspec):
if valid[y, x]:
res = multi_dot(
[D0, inv(np.dot(D0T / var[:, y, x], D0)), D0T, w_raw_var[:, y, x]]
)
else:
res = multi_dot([D0, D0.T, w_raw[:, y, x]])
cont.append(res)
return np.stack(cont).T.reshape(w_raw.shape)
def compute_segmap_gauss(data, pfa, fwhm_fsf=0, bins='fd'):
"""Compute segmentation map from an image, using gaussian statistics.
Parameters
----------
data : array
Input values, typically from a O2 test.
pfa : float
Desired false alarm.
fwhm : int
Width (in integer pixels) of the filter, to convolve with a PSF disc.
bins : str
Method for computings bins (see numpy.histogram_bin_edges).
Returns
-------
float, array
threshold, and labeled image.
"""
# test threshold : uses a Gaussian approximation of the test statistic
# under H0
histO2, frecO2, gamma, mea, std = compute_thresh_gaussfit(data, pfa, bins=bins)
# threshold - erosion and dilation to clean ponctual "source"
mask = data > gamma
mask = binary_erosion(mask, border_value=1, iterations=1)
mask = binary_dilation(mask, iterations=1)
# convolve with PSF
if fwhm_fsf > 0:
fwhm_pix = int(fwhm_fsf) // 2
size = fwhm_pix * 2 + 1
disc = np.hypot(*list(np.mgrid[:size, :size] - fwhm_pix)) < fwhm_pix
mask = fftconvolve(mask, disc, mode='same')
mask = mask > 1e-9
return gamma, ndi_label(mask)[0]
def compute_deblended_segmap(
image, npixels=5, snr=3, dilate_size=11, maxiters=5, sigma=3, fwhm=3.0, kernelsize=5
):
"""Compute segmentation map using photutils.
The segmentation map is computed with the following steps:
- Creation of a mask of sources with the ``snr`` threshold, using
`photutils.make_source_mask`.
- Estimation of the background statistics with this mask
(`astropy.stats.sigma_clipped_stats`), to estimate a refined threshold
with ``median + sigma * rms``.
- Convolution with a Gaussian kernel.
- Creation of the segmentation image, using `photutils.detect_sources`.
- Deblending of the segmentation image, using `photutils.deblend_sources`.
Parameters
----------
image : mpdaf.obj.Image
The input image.
npixels : int
The number of connected pixels that an object must have to be detected.
snr, dilate_size :
See `photutils.make_source_mask`.
maxiters, sigma :
See `astropy.stats.sigma_clipped_stats`.
fwhm : float
Kernel size (pixels) for the PSF convolution.
kernelsize : int
Size of the convolution kernel.
Returns
-------
`~mpdaf.obj.Image`
The deblended segmentation map.
"""
from astropy.convolution import Gaussian2DKernel
from photutils import make_source_mask, detect_sources
data = image.data
mask = make_source_mask(data, snr=snr, npixels=npixels, dilate_size=dilate_size)
bkg_mean, bkg_median, bkg_rms = sigma_clipped_stats(
data, sigma=sigma, mask=mask, maxiters=maxiters
)
threshold = bkg_median + sigma * bkg_rms
logger = logging.getLogger(__name__)
logger.info(
'Background Median %.2f RMS %.2f Threshold %.2f', bkg_median, bkg_rms, threshold
)
sig = fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sig, x_size=kernelsize, y_size=kernelsize)
kernel.normalize()
segm = detect_sources(data, threshold, npixels=npixels, filter_kernel=kernel)
segm_deblend = phot_deblend_sources(
image, segm, npixels=npixels, filter_kernel=kernel, mode='linear'
)
return segm_deblend
def phot_deblend_sources(img, segmap, **kwargs):
"""Wrapper to catch warnings from deblend_sources."""
from photutils import deblend_sources
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
category=AstropyUserWarning,
message='.*contains negative values.*',
)
deblend = deblend_sources(img.data, segmap, **kwargs)
return Image(data=deblend.data, wcs=img.wcs, mask=img.mask, copy=False)
def createradvar(cu, ot):
"""Compute the compactness of areas using variance of position.
The variance is computed on the position given by adding one of the 'ot'
to 'cu'.
Parameters
----------
cu : 2D array
The current array
ot : 3D array
The other array
Returns
-------
var : array
The radial variances
"""
N = ot.shape[0]
out = np.zeros(N)
for n in range(N):
tmp = cu + ot[n, :, :]
y, x = np.where(tmp > 0)
r = np.sqrt((y - y.mean()) ** 2 + (x - x.mean()) ** 2)
out[n] = np.var(r)
return out
def fusion_areas(label, MinSize, MaxSize, option=None):
"""Function which merge areas which have a surface less than
MinSize if the size after merging is less than MaxSize.
The criteria of neighbor can be related to the minimum surface
or to the compactness of the output area
Parameters
----------
label : area
The labels of areas
MinSize : int
The size of areas under which they need to merge
MaxSize : int
The size of areas above which they cant merge
option : string
if 'var' the compactness criteria is used
if None the minimum surface criteria is used
Returns
-------
label : array
The labels of merged areas
"""
while True:
indlabl = np.argsort(np.sum(label, axis=(1, 2)))
tampon = label.copy()
for n in indlabl:
# if the label is not empty
cu = label[n, :, :]
cu_size = np.sum(cu)
if cu_size > 0 and cu_size < MinSize:
# search for neighbors
labdil = label[n, :, :].copy()
labdil = binary_dilation(labdil, iterations=1)
# only neighbors
test = np.sum(label * labdil[np.newaxis, :, :], axis=(1, 2)) > 0
indice = np.where(test == 1)[0]
ind = np.where(indice != n)[0]
indice = indice[ind]
# BOUCLER SUR LES CANDIDATS
ot = label[indice, :, :]
# test size of current with neighbor
if option is None:
test = np.sum(ot, axis=(1, 2))
elif option == 'var':
test = createradvar(cu, ot)
else:
raise ValueError('bad option')
if len(test) > 0:
# keep the min-size
ind = np.argmin(test)
cand = indice[ind]
if (np.sum(label[n, :, :]) + test[ind]) < MaxSize:
label[n, :, :] += label[cand, :, :]
label[cand, :, :] = 0
# clean empty area
ind = np.sum(label, axis=(1, 2)) > 0
label = label[ind, :, :]
tampon = tampon[ind, :, :]
if np.sum(tampon - label) == 0:
break
return label
@timeit
def area_segmentation_square_fusion(nexpmap, MinS, MaxS, NbSubcube, Ny, Nx):
"""Create non square area based on continuum test.
The full 2D image is first segmented in subcube. The area are fused in case
they are too small. Thanks to the continuum test, detected sources are
fused with associated area. The convex enveloppe of the sources inside each
area is then done. Finally all the convex enveloppe growth until using all
the pixels
Parameters
----------
nexpmap : 2D array
the active pixel of the image
MinS : int
The size of areas under which they need to merge
MaxS : int
The size of areas above which they cant merge
NbSubcube : int
Number of subcubes for the spatial segmentation
Nx : int
Number of columns
Ny : int
Number of rows
Returns
-------
label : array
label of the fused square
"""
# square area index with borders
Vert = np.sum(nexpmap, axis=1)
Hori = np.sum(nexpmap, axis=0)
y1 = np.where(Vert > 0)[0][0]
x1 = np.where(Hori > 0)[0][0]
y2 = Ny - np.where(Vert[::-1] > 0)[0][0]
x2 = Nx - np.where(Hori[::-1] > 0)[0][0]
start = (y1, x1)
inty, intx = spatial_segmentation(Nx, Ny, NbSubcube, start=start)
# % FUSION square AREA
label = []
for numy in range(NbSubcube):
for numx in range(NbSubcube):
y1, y2, x1, x2 = inty[numy + 1], inty[numy], intx[numx], intx[numx + 1]
tmp = nexpmap[y1:y2, x1:x2]
if np.mean(tmp) != 0:
labtest = ndi_label(tmp)[0]
labtmax = labtest.max()
for n in range(labtmax):
label_tmp = np.zeros((Ny, Nx))
label_tmp[y1:y2, x1:x2] = labtest == (n + 1)
label.append(label_tmp)
label = np.array(label)
return fusion_areas(label, MinS, MaxS)
@timeit
def area_segmentation_sources_fusion(labsrc, label, pfa, Ny, Nx):
"""Function to create non square area based on continuum test. Thanks
to the continuum test, detected sources are fused with associated area.
The convex enveloppe of the sources inside
each area is then done. Finally all the convex enveloppe growth until
using all the pixels
Parameters
----------
labsrc : array
segmentation map
label : array
label of fused square generated in area_segmentation_square_fusion
pfa : float
Pvalue for the test which performs segmentation
NbSubcube : int
Number of subcubes for the spatial segmentation
Nx : int
Number of columns
Ny : int
Number of rows
Returns
-------
label_out : array
label of the fused square and sources
"""
# compute the sources label
nlab = labsrc.max()
sources = np.zeros((nlab, Ny, Nx))
for n in range(1, nlab + 1):
sources[n - 1, :, :] = (labsrc == n) > 0
sources_save = sources.copy()
nlabel = label.shape[0]
nsrc = sources.shape[0]
for n in range(nsrc):
cu_src = sources[n, :, :]
# find the area in which the current source
# has bigger probability to be
test = np.sum(cu_src[np.newaxis, :, :] * label, axis=(1, 2))
if len(test) > 0:
ind = np.argmax(test)
# associate the source to the label
label[ind, :, :] = (label[ind, :, :] + cu_src) > 0
# mask other labels from this sources
mask = (1 - label[ind, :, :])[np.newaxis, :, :]
ot_lab = np.delete(np.arange(nlabel), ind)
label[ot_lab, :, :] *= mask
# delete the source
sources[n, :, :] = 0
return label, np.sum(sources_save, axis=0)
@timeit
def area_segmentation_convex_fusion(label, src):
"""Function to compute the convex enveloppe of the sources inside
each area is then done. Finally all the convex enveloppe growth until
using all the pixels
Parameters
----------
label : array
label containing the fusion of fused squares and sources
generated in area_segmentation_sources_fusion
src : array
label of estimated sources from segmentation map
Returns
-------
label_out : array
label of the convex
"""
label_fin = []
# for each label
for lab_n in range(label.shape[0]):
# keep only the sources inside the label
lab = label[lab_n, :, :]
data = src * lab
if np.sum(data > 0):
points = np.array(np.where(data > 0)).T
y_0 = points[:, 0].min()
x_0 = points[:, 1].min()
points[:, 0] -= y_0
points[:, 1] -= x_0
sny, snx = points[:, 0].max() + 1, points[:, 1].max() + 1
# compute the convex enveloppe of a sub part of the label
lab_temp = Convexline(points, snx, sny)
# in full size
label_out = np.zeros((label.shape[1], label.shape[2]))
label_out[y_0 : y_0 + sny, x_0 : x_0 + snx] = lab_temp
label_out *= lab
label_fin.append(label_out)
return np.array(label_fin)
def Convexline(points, snx, sny):
"""Function to compute the convex enveloppe of the sources inside
each area is then done and full the polygone
Parameters
----------
data : array
contain the position of source for one of the label
snx,sny: int,int
the effective size of area in the label
Returns
-------
lab_out : array
The filled convex enveloppe corresponding the sub label
"""
# convex enveloppe vertices
hull = ConvexHull(points)
xs = hull.points[hull.simplices[:, 1]]
xt = hull.points[hull.simplices[:, 0]]
sny, snx = points[:, 0].max() + 1, points[:, 1].max() + 1
tmp = np.zeros((sny, snx))
# create le line between vertices
for n in range(hull.simplices.shape[0]):
x0, x1, y0, y1 = xs[n, 1], xt[n, 1], xs[n, 0], xt[n, 0]
nx = np.abs(x1 - x0)
ny = np.abs(y1 - y0)
if ny > nx:
xa, xb, ya, yb = y0, y1, x0, x1
else:
xa, xb, ya, yb = x0, x1, y0, y1
if xa > xb:
xb, xa, yb, ya = xa, xb, ya, yb
indx = np.arange(xa, xb, dtype=int)
N = len(indx)
indy = np.array(ya + (indx - xa) * (yb - ya) / N, dtype=int)
if ny > nx:
tmpx, tmpy = indx, indy
indy, indx = tmpx, tmpy
tmp[indy, indx] = 1
radius = 1
dxy = 2 * radius
x = np.linspace(-dxy, dxy, 1 + (dxy) * 2)
y = np.linspace(-dxy, dxy, 1 + (dxy) * 2)
xv, yv = np.meshgrid(x, y)
r = np.sqrt(xv ** 2 + yv ** 2)
mask = np.abs(r) <= radius
# to close the lines
conv_lab = fftconvolve(tmp, mask, mode='same') > 1e-9
lab_out = conv_lab.copy()
for n in range(conv_lab.shape[0]):
ind = np.where(conv_lab[n, :] == 1)[0]
lab_out[n, ind[0] : ind[-1]] = 1
return lab_out
@timeit
def area_growing(label, mask):
"""Growing and merging of all areas
Parameters
----------
label : array
label containing convex enveloppe of each area
mask : array
mask of positive pixels
Returns
-------
label_out : array
label of the convex envelop grown to the max number of pixels
"""
# start by smaller
set_ind = np.argsort(np.sum(label, axis=(1, 2)))
# closure horizon
niter = 20
label_out = label.copy()
nlab = label_out.shape[0]
while True:
s = np.sum(label_out)
for n in set_ind:
cu_lab = label_out[n, :, :]
ind = np.delete(np.arange(nlab), n)
ot_lab = label_out[ind, :, :]
border = (1 - (np.sum(ot_lab, axis=0) > 0)) * mask
# closure in all case + 1 dilation
cu_lab = binary_dilation(cu_lab, iterations=niter + 1)
cu_lab = binary_erosion(cu_lab, border_value=1, iterations=niter)
label_out[n, :, :] = cu_lab * border
if np.sum(label_out) == np.sum(mask) or np.sum(label_out) == s:
break
return label_out
@timeit
def area_segmentation_final(label, MinS, MaxS):
"""Merging of small areas and give index
Parameters
----------
label : array
Label containing convex enveloppe of each area
MinS : number
The size of areas under which they need to merge
MaxS : number
The size of areas above which they cant merge
Returns
-------
sety,setx : array
List of index of each label
"""
# if an area is too small
label = fusion_areas(label, MinS, MaxS, option='var')
# create label map
areamap = np.zeros(label.shape[1:])
for i in range(label.shape[0]):
areamap[label[i, :, :] > 0] = i + 1
return areamap
@timeit
def Compute_GreedyPCA_area(
NbArea, cube_std, areamap, Noise_population, threshold_test, itermax, testO2
):
"""Function to compute the PCA on each zone of a data cube.
Parameters
----------
NbArea : int
Number of area
cube_std : array
Cube data weighted by the standard deviation
areamap : array
Map of areas
Noise_population : float
Proportion of estimated noise part used to define the
background spectra
threshold_test : list
User given list of threshold (not pfa) to apply on each area, the
list is of length NbAreas or of length 1.
itermax : int
Maximum number of iterations
testO2 : list of arrays
Result of the O2 test
Returns
-------
cube_faint : array
Faint greedy decomposition od STD Cube
"""
cube_faint = cube_std.copy()
mapO2 = np.zeros(cube_std.shape[1:])
nstop = 0
area_iter = range(1, NbArea + 1)
if NbArea > 1:
area_iter = progressbar(area_iter)
for area_ind in area_iter:
# limits of each spatial zone
ksel = areamap == area_ind
# Data in this spatio-spectral zone
cube_temp = cube_std[:, ksel]
thr = threshold_test[area_ind - 1]
test = testO2[area_ind - 1]
cube_faint[:, ksel], mO2, kstop = Compute_GreedyPCA(
cube_temp, test, thr, Noise_population, itermax
)
mapO2[ksel] = mO2
nstop += kstop
return cube_faint, mapO2, nstop
def Compute_PCA_threshold(faint, pfa):
"""Compute threshold for the PCA.
Parameters
----------
faint : array
Standardized data.
pfa : float
PFA of the test.
Returns
-------
test, histO2, frecO2, thresO2, mea, std
Threshold for the O2 test
"""
test = O2test(faint)
# automatic threshold computation
histO2, frecO2, thresO2, mea, std = compute_thresh_gaussfit(test, pfa)
return test, histO2, frecO2, thresO2, mea, std
def Compute_GreedyPCA(cube_in, test, thresO2, Noise_population, itermax):
"""Function to compute greedy svd. thanks to the test (test_fun) and
according to a defined threshold (threshold_test) the cube is segmented
in nuisance and background part. A part of the background part
(1/Noise_population %) is used to compute a mean background, a signature.
The Nuisance part is orthogonalized to this signature in order to not
loose this part during the greedy process. SVD is performed on nuisance
in order to modelized the nuisance part and the principal eigen vector,
only one, is used to perform the projection of the whole set of data:
Nuisance and background. The Nuisance spectra which satisfied the test
are updated in the background computation and the background is so
cleaned from sources signature. The iteration stop when all the spectra
satisfy the criteria
Parameters
----------
Cube_in : array
The 3D cube data clean
test_fun: function
the test to be performed on data
Noise_population : float
Fraction of spectra estimated as background
itermax : int
Maximum number of iterations
Returns
-------
faint : array
cleaned cube
mapO2 : array
2D MAP filled with the number of iteration per spectra
thresO2 : float
Threshold for the O2 test
nstop : int
Nb of times the iterations have been stopped when > itermax
"""
logger = logging.getLogger(__name__)
# nuisance part
pypx = np.where(test > thresO2)[0]
npix = len(pypx)
faint = cube_in.copy()
mapO2 = np.zeros(faint.shape[1])
nstop = 0
with progressbar(total=npix, miniters=0, leave=False) as bar:
# greedy loop based on test
nbiter = 0
while len(pypx) > 0:
nbiter += 1
mapO2[pypx] += 1
if nbiter > itermax:
nstop += 1
logger.warning('Warning iterations stopped at %d', nbiter)
break
# vector data
test_v = np.ravel(test)
test_v = test_v[test_v > 0]
nind = np.where(test_v <= thresO2)[0]
sortind = np.argsort(test_v[nind])
# at least one spectra is used to perform the test
nb = 1 + int(len(nind) / Noise_population)
# background estimation
b = np.mean(faint[:, nind[sortind[:nb]]], axis=1)
# cube segmentation
x_red = faint[:, pypx]
# orthogonal projection with background.
x_red -= orthogonal_projection(b, x_red)
x_red /= np.nansum(b ** 2)
# sparse svd if nb spectrum > 1 else normal svd
if x_red.shape[1] == 1:
break
# if PCA will not converge or if giant pint source will exists
# in faint PCA the reason will be here, in later case
# add condition while calculating the "mean_in_pca"
# deactivate the substraction of the mean.
# This will make the vector whish is above threshold
# equal to the background. For now we prefer to keep it, to
# stop iteration earlier in order to keep residual sources
# with the hypothesis that this spectrum is slightly above
# the threshold (what we observe in data)
U, s, V = np.linalg.svd(x_red, full_matrices=False)
else:
U, s, V = svds(x_red, k=1)
# orthogonal projection
faint -= orthogonal_projection(U[:, 0], faint)
# test
test = O2test(faint)
# nuisance part
pypx = np.where(test > thresO2)[0]
bar.update(npix - len(pypx) - bar.n)
bar.update(npix - len(pypx) - bar.n)
return faint, mapO2, nstop
def O2test(arr):
"""Compute the second order test on spaxels.
The test estimate the background part and nuisance part of the data by mean
of second order test: Testing mean and variance at same time of spectra.
Parameters
----------
arr : array-like
The 3D cube data to test.
Returns
-------
ndarray
result of the test.
"""
# np.einsum('ij,ij->j', arr, arr) / arr.shape[0]
return np.mean(arr ** 2, axis=0)
def compute_thresh_gaussfit(data, pfa, bins='fd', sigclip=10):
"""Compute a threshold with a gaussian fit of a distribution.
Parameters
----------
data : array
2D data from the O2 test.
pfa : float
Desired false alarm.
bins : str
Method for computings bins (see numpy.histogram_bin_edges).
Returns
-------
histO2 : histogram value of the test
frecO2 : frequencies of the histogram
thresO2 : automatic threshold for the O2 test
mea : mean value of the fit
std : sigma value of the fit
"""
logger = logging.getLogger(__name__)
data = data[data > 0]
data = sigma_clip(data, sigclip)
data = data.compressed()
histO2, frecO2 = np.histogram(data, bins=bins, density=True)
ind = np.argmax(histO2)
mod = frecO2[ind]
ind2 = np.argmin((histO2[ind] / 2 - histO2[:ind]) ** 2)
fwhm = mod - frecO2[ind2]
sigma = fwhm / np.sqrt(2 * np.log(2))
coef = stats.norm.ppf(pfa)
thresO2 = mod - sigma * coef
logger.debug('1st estimation mean/std/threshold: %f/%f/%f', mod, sigma, thresO2)
x = (frecO2[1:] + frecO2[:-1]) / 2
g1 = Gaussian1D(amplitude=histO2.max(), mean=mod, stddev=sigma)
fit_g = LevMarLSQFitter()
xcut = g1.mean + gaussian_sigma_to_fwhm * g1.stddev / 2
ksel = x < xcut
g2 = fit_g(g1, x[ksel], histO2[ksel])
mea, std = (g2.mean.value, g2.stddev.value)
# make sure to return float, not np.float64
thresO2 = float(mea - std * coef)
return histO2, frecO2, thresO2, mea, std
def _convolve_fsf(psf, cube, weights=None):
ones = np.ones_like(cube)
if weights is not None:
cube = cube * weights
ones *= weights
psf = np.ascontiguousarray(psf[::-1, ::-1])
psf -= psf.mean()
# build a weighting map per PSF and convolve
cube_fsf = fftconvolve(cube, psf, mode='same')
# Spatial part of the norm of the 3D atom
psf **= 2
norm_fsf = fftconvolve(ones, psf, mode='same')
return cube_fsf, norm_fsf
def _convolve_profile(Dico, cube_fft, norm_fft, fshape, n_jobs, parallel):
# Second cube of correlation values
dico_fft = fft.rfftn(Dico, fshape)[:, None] * cube_fft
cube_profile = _convolve_spectral(
parallel, n_jobs, dico_fft, fshape, func=fft.irfftn
)
dico_fft = fft.rfftn(Dico ** 2, fshape)[:, None] * norm_fft
norm_profile = _convolve_spectral(
parallel, n_jobs, dico_fft, fshape, func=fft.irfftn
)
norm_profile[norm_profile <= 0] = np.inf
np.sqrt(norm_profile, out=norm_profile)
cube_profile /= norm_profile
return cube_profile
def _convolve_spectral(parallel, nslices, arr, shape, func=fft.rfftn):
arr = np.array_split(arr, nslices, axis=-1)
out = parallel(delayed(func)(chunk, shape, axes=(0,)) for chunk in arr)
return np.concatenate(out, axis=-1)
@timeit
def Correlation_GLR_test(
cube, fsf, weights, profiles, nthreads=1, pcut=None, pmeansub=True
):
"""Compute the cube of GLR test values with the given PSF and
dictionary of spectral profiles.
Parameters
----------
cube : array
data cube
fsf : list of arrays
FSF for each field of this data cube
weights : list of array
Weight maps of each field
profiles : list of ndarray
Dictionary of spectral profiles to test
nthreads : int
number of threads
pcut : float
Cut applied to the profiles to limit their width
pmeansub : bool
Subtract the mean of the profiles
Returns
-------
correl : array
cube of T_GLR values of maximum correlation
profile : array
Number of the profile associated to the T_GLR
correl_min : array
cube of T_GLR values of minimum correlation
"""
logger = logging.getLogger(__name__)
Nz, Ny, Nx = cube.shape
# Spatial convolution of the weighted data with the zero-mean FSF
logger.info(
'Step 1/3 and 2/3: '
'Spatial convolution of weighted data with the zero-mean FSF, '
'Computing Spatial part of the norm of the 3D atoms'
)
if weights is None: # one FSF
fsf = [fsf]
weights = [None]
nfields = len(fsf)
fields = range(nfields)
if nfields > 1:
fields = progressbar(fields)
if nthreads != 1:
# copy the arrays because otherwise joblib's memmap handling fails
# (maybe because of astropy.io.fits doing weird things with the memap?)
cube = np.array(cube)
# Make sure that we have a float array in C-order because scipy.fft
# (new in v1.4) fails with Fortran ordered arrays.
cube = cube.astype(float)
with Parallel(n_jobs=nthreads) as parallel:
for nf in fields:
# convolve spatially each spectral channel by the FSF, and do the
# same for the norm (inverse variance)
res = parallel(
progressbar(
[
delayed(_convolve_fsf)(fsf[nf][i], cube[i], weights=weights[nf])
for i in range(Nz)
]
)
)
res = [np.stack(arr) for arr in zip(*res)]
if nf == 0:
cube_fsf, norm_fsf = res
else:
cube_fsf += res[0]
norm_fsf += res[1]
# First cube of correlation values
# initialization with the first profile
logger.info('Step 3/3 Computing second cube of correlation values')
# Prepare profiles:
# Cut the profiles and subtract the mean, if asked to do so
prof_cut = []
for prof in profiles:
prof = prof.copy()
if pcut is not None:
lpeak = prof.argmax()
lw = np.max(np.abs(np.where(prof >= pcut)[0][[0, -1]] - lpeak))
prof = prof[lpeak - lw : lpeak + lw + 1]
prof /= np.linalg.norm(prof)
if pmeansub:
prof -= prof.mean()
prof_cut.append(prof)
# compute the optimal shape for FFTs (on the wavelength axis).
# For profiles with different shapes, we need to know the indices to
# extract the signal from the inverse fft.
s1 = np.array(cube_fsf.shape) # cube shape
s2 = np.array([(d.shape[0], 1, 1) for d in prof_cut]) # profiles shape
fftshape = s1 + s2 - 1 # fft shape
fshape = [
fftpack.helper.next_fast_len(int(d)) # optimal fft shape
for d in fftshape.max(axis=0)[:1]
]
# and now computes the indices to extract the cube from the inverse fft.
startind = (fftshape - s1) // 2
endind = startind + s1
cslice = [slice(startind[k, 0], endind[k, 0]) for k in range(len(endind))]
# Compute the FFTs of the cube and norm cube, splitting them on multiple
# threads if needed
with Parallel(n_jobs=nthreads, backend='threading') as parallel:
cube_fft = _convolve_spectral(
parallel, nthreads, cube_fsf, fshape, func=fft.rfftn
)
norm_fft = _convolve_spectral(
parallel, nthreads, norm_fsf, fshape, func=fft.rfftn
)
cube_fsf = norm_fsf = res = None
cube_fft = cube_fft.reshape(cube_fft.shape[0], -1)
norm_fft = norm_fft.reshape(norm_fft.shape[0], -1)
profile = np.empty((Nz, Ny * Nx), dtype=np.uint8)
correl = np.full((Nz, Ny * Nx), -np.inf)
correl_min = np.full((Nz, Ny * Nx), np.inf)
# for each profile, compute convolve the convolved cube and norm cube.
# Then for each pixel we keep the maximum correlation (and min correlation)
# and the profile number with the max correl.
with Parallel(n_jobs=nthreads, backend='threading') as parallel:
for k in progressbar(range(len(prof_cut))):
cube_profile = _convolve_profile(
prof_cut[k], cube_fft, norm_fft, fshape, nthreads, parallel
)
cube_profile = cube_profile[cslice[k]]
profile[cube_profile > correl] = k
np.maximum(correl, cube_profile, out=correl)
np.minimum(correl_min, cube_profile, out=correl_min)
profile = profile.reshape(Nz, Ny, Nx)
correl = correl.reshape(Nz, Ny, Nx)
correl_min = correl_min.reshape(Nz, Ny, Nx)
return correl, profile, correl_min
def compute_local_max(correl, correl_min, mask, size=3):
"""Compute the local maxima of the maximum correlation and local maxima
of minus the minimum correlation distribution.
Parameters
----------
correl : array
T_GLR values with edges excluded (from max correlation)
correl_min : array
T_GLR values with edges excluded (from min correlation)
mask : array
mask array (true if pixel is masked)
size : int
Number of connected components
Returns
-------
array, array
local maxima of correlations and local maxima of -correlations
"""
# local maxima of maximum correlation
if np.isscalar(size):
size = (size, size, size)
local_max = maximum_filter(correl, size=size)
local_mask = correl == local_max
local_mask[mask] = False
local_max *= local_mask
# local maxima of minus minimum correlation
minus_correl_min = -correl_min
local_min = maximum_filter(minus_correl_min, size=size)
local_mask = minus_correl_min == local_min
local_mask[mask] = False
local_min *= local_mask
return local_max, local_min
def itersrc(cat, tol_spat, tol_spec, n, id_cu):
"""Recursive function to perform the spatial merging.
If neighborhood are close spatially to a lines: they are merged,
then the neighbor of the seed is analysed if they are enough close to
the current line (a neighbor of the original seed) they are merged
only if the frequency is enough close (surrogate) if the frequency is
different it is rejected.
If two line (or a group of lines and a new line) are:
Enough close without a big spectral gap
not in the same label (a group in background close to one source
inside a source label)
the resulting ID is the ID of the source label and not the background
Parameters
----------
cat : kinda of catalog of the previously merged lines
xout,yout,zout,aout,iout:
the 3D position, area label and ID for all analysed lines
tol_spat : int
spatial tolerance for the spatial merging
tol_spec : int
spectral tolerance for the spectral merging
n : int
index of the original seed
id_cu :
ID of the original seed
"""
# compute spatial distance to other points.
# - id_cu is the detection processed at the start (from
# spatiospectral_merging), while n is the detection currently processed
# in the recursive call
matched = cat['matched']
spatdist = np.hypot(cat['x0'][n] - cat['x0'], cat['y0'][n] - cat['y0'])
spatdist[matched] = np.inf
cu_spat = np.hypot(cat['x0'][id_cu] - cat['x0'], cat['y0'][id_cu] - cat['y0'])
cu_spat[matched] = np.inf
ind = np.where(spatdist < tol_spat)[0]
if len(ind) == 0:
return
for indn in ind:
if not matched[indn]:
if cu_spat[indn] > tol_spat * np.sqrt(2):
# check spectral content
dz = np.sqrt((cat['z0'][indn] - cat['z0'][id_cu]) ** 2)
if dz < tol_spec:
cat[indn]['matched'] = True
cat[indn]['imatch'] = id_cu
itersrc(cat, tol_spat, tol_spec, indn, id_cu)
else:
cat[indn]['matched'] = True
cat[indn]['imatch'] = id_cu
itersrc(cat, tol_spat, tol_spec, indn, id_cu)
def spatiospectral_merging(tbl, tol_spat, tol_spec):
"""Perform the spatial and spatio spectral merging.
The spectral merging give the same ID if several group of lines (from
spatial merging) if they share at least one line frequency
Parameters
----------
tbl : `astropy.table.Table`
ID,x,y,z,...
tol_spat : int
spatial tolerance for the spatial merging
tol_spec : int
spectral tolerance for the spectral merging
Returns
-------
`astropy.table.Table`
Table: id, x, y, z, area, imatch, imatch2
imatch is the ID after spatial and spatio spectral merging.
imatch2 is the ID after spatial merging only.
"""
Nz = len(tbl)
tbl['_id'] = np.arange(Nz) # id of the detection
tbl['matched'] = np.zeros(Nz, dtype=bool) # is the detection matched ?
tbl['imatch'] = np.arange(Nz) # id of the matched detection
for row in tbl:
if not row['matched']:
row['matched'] = True
itersrc(tbl, tol_spat, tol_spec, row['_id'], row['_id'])
# renumber output IDs
for n, imatch in enumerate(np.unique(tbl['imatch'])):
# for detections in multiple segmap regions, set the max region
# number... this is needed to select all detections in the loop below
ind = tbl['imatch'] == imatch
tbl['area'][ind] = tbl['area'][ind].max()
tbl['imatch'][ind] = n
tbl.sort('imatch')
# Special treatment for segmap regions, merge sources with close
# spectral lines
tbl['imatch2'] = tbl['imatch'] # store result before spectral merging
iout = tbl['imatch']
zout = tbl['z0']
for n, area_cu in enumerate(np.unique(tbl['area'])):
if area_cu > 0:
# take all detections inside a segmap region
ind = np.where(tbl['area'] == area_cu)[0]
group_dep = np.unique(iout[ind])
for cu in group_dep:
group = np.unique(iout[ind])
if len(group) == 1: # if there is only one group remaining
break
if cu in group:
for otg in group:
if otg != cu:
zin = zout[iout == cu]
zot = zout[iout == otg]
difz = zin[np.newaxis, :].T - zot[np.newaxis, :]
if np.sqrt(difz ** 2).min() < tol_spec:
# if the minimum z distance is less than
# tol_spec, then merge the sources
iout[iout == otg] = cu
tbl.remove_columns(('_id', 'matched'))
return tbl
@timeit
def Compute_threshold_purity(
purity, cube_local_max, cube_local_min, segmap=None, threshlist=None
):
"""Compute threshold values corresponding to a given purity.
Parameters
----------
purity : float
The target purity between 0 and 1.
cube_local_max : array
Cube of local maxima from maximum correlation.
cube_local_min : array
Cube of local maxima from minus minimum correlation.
segmap : array
Segmentation map to get the background regions.
threshlist : list
List of thresholds to compute the purity (default None).
Returns
-------
threshold : float
The estimated threshold associated to the purity.
res : astropy.table.Table
Table with the purity results for each threshold:
- PVal_r : The purity function
- index_pval : index value to plot
- Det_m : Number of detections (-DATA)
- Det_M : Number of detections (+DATA)
"""
logger = logging.getLogger(__name__)
# total number of spaxels
L1 = np.prod(cube_local_min.shape[1:])
# background only
if segmap is not None:
segmask = segmap == 0
cube_local_min = cube_local_min * segmask
# number of spaxels considered for calibration
L0 = np.count_nonzero(segmask)
logger.info('using only background pixels (%.1f%%)', L0 / L1 * 100)
else:
L0 = L1
if threshlist is None:
threshmax = min(cube_local_min.max(), cube_local_max.max())
threshmin = np.median(np.amax(cube_local_max, axis=0)) * 1.1
threshlist = | np.linspace(threshmin, threshmax, 50) | numpy.linspace |
"""Generic feature utils for various purposes.
There are a few main things in this module:
- `read_and_join_features()`: a function to read various feature files and concatenate them
- `FastClassifier`: a class to do fast interactive classification of items based on exemplar SVMs
- If you run this module, it starts a server that provides a simple API to do fast interactive
classification. You should run it in a directory which has a `static` subdirectory, containing css
and js files with certain names (see command line args for more).
The API has the following endpoints:
- / : Loads a simple blank html page with a div with id "main", react, and given css and js files
- /classify : You can GET this with args 'pos' and 'neg', which should be comma-separated keys of
the items to use for training the classifier (negatives are optional). It returns a JSON object
with 'status' (either 'ok' or 'error', and if ok, then 'cls' which contains pairs of (key,
score) of matching items. Nothing is filtered out in the matches (e.g. the positives used to
train it, so you have to do that yourself).
- /static : A simple static file handler for everything in the "static" directory
"""
import json
import logging
import multiprocessing as mp
import os
import re
import time
from argparse import ArgumentParser
from collections import Counter, defaultdict
from os.path import exists
from random import sample
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple
import numpy as np # type: ignore
import tornado.ioloop
import tornado.web
from tqdm import tqdm # type: ignore
from gensim.models import KeyedVectors # type: ignore
from numpy.random import default_rng
from scipy.spatial.distance import cdist, euclidean
# from sklearn.utils.testing import ignore_warnings
from PIL import Image # type: ignore
from sklearn.exceptions import ConvergenceWarning # type: ignore
from sklearn.linear_model import SGDClassifier # type: ignore
from sklearn.preprocessing import normalize # type: ignore
from tornado.web import HTTPError, RequestHandler, StaticFileHandler
def show_times(times: List[float]) -> None:
"""Shows times in a pretty-printed way"""
logging.info(
"times: %s = %0.3fs",
"+".join("%0.2fs" % (t1 - t0) for t0, t1 in zip(times, times[1:])),
times[-1] - times[0],
)
def read_and_join_features(
feature_paths: Sequence[str],
key_func=lambda s: s.strip().replace("\\n", ""),
max_features=-1,
) -> Tuple[List[str], np.ndarray, Dict[str, List[str]]]:
"""Reads multiple `feature_paths` and joins them together.
Returns `(keys, features, key_to_item)`, where:
- `keys` is a list of keys
- `features` is a 2-d numpy array, where each row corresponds to a `key`
- `key_to_item` is a dict from key to an item dict. This dict contains:
- `paths`: the extracted original path for each key, for each feature path
- any other attributes in the input data files:
- for gensim inputs, we use the `get_vecattr()` interface to get attributes
- for npz files, we look for any other arrays of the same length as `features` in the input,
and use those
The extension is used to determine what format to read:
`.wv` or `.kv`: Assumed to be in gensim's KeyedVector format
`.npz`: Assumed to be an npz file, with 'paths' and 'features' fields.
All features are concatenated together, and only those keys where all inputs gave a vector are
used.
The paths (i.e., keys in .wv files, or the 'path' fields in .npz files) are converted to keys
using the given `key_func`. These should be unique per path!
If max_features > 0, then we limit to that many features
"""
key_to_row = defaultdict(list)
key_to_item = defaultdict(dict)
for n, feature_path in enumerate(feature_paths):
logging.info(
"Reading features from file %d/%d: %s", n + 1, len(feature_paths), feature_path
)
def add_row(path, row, attrs):
"""Adds the given feature `row` for `path`, with optional `attrs`"""
key = key_func(path)
# if we already have one set of features, then this key must already be in there
if n > 0 and key not in key_to_row:
return
key_to_row[key].append(row)
item = key_to_item[key]
if "paths" not in item:
item["paths"] = []
item["paths"].append(path)
for attr, value in attrs.items():
if attr == "id":
attr = "_id"
item[attr] = value
if feature_path.endswith(".wv") or feature_path.endswith(".kv"):
wv = KeyedVectors.load(feature_path, mmap="r")
attr_fields = sorted(wv.expandos)
logging.info(
" Read %d wv, attrs: %s, %s", len(wv), sorted(wv.expandos), wv.index_to_key[:10]
)
for path in wv.index_to_key:
attrs = {field: wv.get_vecattr(path, field) for field in attr_fields}
add_row(path, wv[path], attrs)
if max_features > 0 and len(key_to_row) >= max_features:
break
elif feature_path.endswith(".npz"):
data = np.load(feature_path)
paths = [str(path) for path in data["paths"]]
features = data["features"]
attrs_by_field = {}
for field in data:
if field in ("paths", "features"):
continue
try:
if len(data[field]) == len(features):
attrs_by_field[field] = data[field]
except Exception: # field that doesn't have len()
pass
for idx, (path, row) in enumerate(zip(paths, features)):
attrs = {field: attrs_by_field[field][idx] for field in attrs_by_field}
add_row(path, row, attrs)
if max_features > 0 and len(key_to_row) >= max_features:
break
else:
raise NotImplementedError(
"Do not know how to deal with this filetype: %s" % (feature_path)
)
# merge all features together
features = []
for key, lst in key_to_row.items():
if len(lst) == len(feature_paths):
features.append((key, np.hstack(lst)))
if not features:
logging.warning("No valid features found!")
return None
keys, features = zip(*features)
features = np.vstack(features)
logging.info("Got %d keys and features of shape %s", len(keys), features.shape)
key_to_item = dict(key_to_item)
for key, item in key_to_item.items():
key_to_item[key] = dict(item)
return keys, features, key_to_item
class FastClassifier:
"""Wrapper class for a fast classifier that uses pre-computed features"""
def __init__(
self,
feature_paths: List[str],
sqrt_normalize=False,
l2_normalize=False,
n_models=4,
n_top=500,
n_negatives=50,
key_func_str=None,
max_features=-1,
filter_regexps=None,
):
"""Loads the data and preprocesses it.
Reads all `feature_paths` and concatenates features from each.
The features are then optionally run through `sqrt_normalize` and/or `l2_normalize` if requested.
The workings of the classifier are based on the following parameters:
- n_models: The number of individual models to train
- n_top: The number of top results from each individual classifier to use
- n_negatives: The number of random negatives to use for each classifier
You can optionally pass in a 'key_func_str', which is eval'ed to get the key func
"""
t0 = time.time()
if not key_func_str:
key_func_str = "path"
def key_func(path):
return eval(key_func_str)
keys, features, key_to_item = read_and_join_features(
feature_paths,
key_func=key_func,
max_features=max_features,
)
# apply filter regexps
if filter_regexps:
logging.info(
"Initially had %d keys, %s features, %d items",
len(keys),
features.shape,
len(key_to_item),
)
to_keep = set()
# check each item key and each item's values for each filter regexp
for key, item in key_to_item.items():
for regexp in filter_regexps:
regexp = re.compile(regexp)
if regexp.search(key):
break
matched_field = False
for field, value in item.items():
if regexp.search(str(value)):
logging.debug(f"matched {key} {regexp} {field}={value}")
matched_field = True
break
if matched_field:
break
else: # none of the regexps matched, so keep it
to_keep.add(key)
# now do the filtering
key_to_item = {key: item for key, item in key_to_item.items() if key in to_keep}
keys, features = zip(
*[(key, vec) for key, vec in zip(keys, features) if key in to_keep]
)
features = np.array(features)
logging.info(
"Left with %d keys, %s features, %d items",
len(keys),
features.shape,
len(key_to_item),
)
self.paths = [key_to_item[key]["paths"][0] for key in keys]
if sqrt_normalize:
logging.info("Applying SQRT norm")
features = np.sqrt(features)
if l2_normalize:
logging.info("Applying L2 normalization")
features = normalize(features, norm="l2", axis=1)
self.features_by_key = {key: feature for key, feature in zip(keys, features)}
# in our full list of features, we add padding dimension for fast dot products
self.features = np.hstack([features, np.ones((len(keys), 1))])
logging.debug(
"Pre: %s, post: %s, %s, %s",
features.shape,
self.features.shape,
features,
self.features,
)
logging.info(
"Loaded fast classifier from %d feature paths with %d keys in %0.2fs",
len(feature_paths),
len(keys),
time.time() - t0,
)
# now save other key variables
self.keys, self.key_to_item = keys, key_to_item
self.n_models = n_models
self.n_top = n_top
self.n_negatives = n_negatives
self.rng = | default_rng(0) | numpy.random.default_rng |
# -*- coding: utf-8 -*-
"""
Compute kcore and avg cascade length
Extract the train set for INFECTOR
"""
import igraph as ig
import time
import pandas as pd
import json
import numpy as np
from datetime import datetime
def sort_papers(papers):
"""
# Sort MAG diffusion cascade, which is a list of papers and their authors, in the order the paper'sdate
"""
x =list(map(int,list(map(lambda x:x.split()[-1],papers))))
return [papers[i].strip() for i in np.argsort(x)]
def remove_duplicates(cascade_nodes,cascade_times):
"""
# Some tweets have more then one retweets from the same person
# Keep only the first retweet of that person
"""
duplicates = set([x for x in cascade_nodes if cascade_nodes.count(x)>1])
for d in duplicates:
to_remove = [v for v,b in enumerate(cascade_nodes) if b==d][1:]
cascade_nodes= [b for v,b in enumerate(cascade_nodes) if v not in to_remove]
cascade_times= [b for v,b in enumerate(cascade_times) if v not in to_remove]
return cascade_nodes, cascade_times
def store_samples(fn,cascade_nodes,cascade_times,initiators,train_set,op_time,sampling_perc=120):
"""
# Store the samples for the train set as described in the node-context pair creation process for INFECTOR
"""
#---- Inverse sampling based on copying time
#op_id = cascade_nodes[0]
no_samples = round(len(cascade_nodes)*sampling_perc/100)
casc_len = len(cascade_nodes)
#times = [op_time/(abs((cascade_times[i]-op_time))+1) for i in range(0,len(cascade_nodes))]
times = [1.0/(abs((cascade_times[i]-op_time))+1) for i in range(0,casc_len)]
s_times = sum(times)
if s_times==0:
samples = []
else:
print("Saved in train_set")
probs = [float(i)/s_times for i in times]
samples = np.random.choice(a=cascade_nodes, size=int(no_samples), p=probs)
#----- Store train set
if(fn=="mag"):
for op_id in initiators:
for i in samples:
#---- Write inital node, copying node,length of cascade
train_set.write(str(op_id)+","+i+","+str(casc_len)+"\n")
else:
op_id = initiators[0]
for i in samples:
#if(op_id!=i):
#---- Write initial node, copying node, copying time, length of cascade
train_set.write(str(op_id)+","+i+","+str(casc_len)+"\n")
def run(fn,sampling_perc,log):
print("Reading the network")
g = ig.Graph.Read_Ncol(fn+"/"+fn+"_network.txt")
vs = ig.VertexSeq(g)
# in mag it is undirected
if fn =="mag":
g.to_undirected()
f = open(fn+"/Init_Data/train_cascades.txt","r")
train_set = open(fn+"/train_set.txt","w")
#----- Initialize features
idx = 0
deleted_nodes = []
g.vs["Cascades_started"] = 0
g.vs["Cumsize_cascades_started"] = 0
g.vs["Cascades_participated"] = 0
log.write(" net:"+fn+"\n")
start_t = 0 #int(next(f))
idx=0
if(fn=="mag"):
start_t = int(next(f))
start = time.time()
#---------------------- Iterate through cascades to create the train set
for line in f:
if(fn=="mag"):
parts = line.split(";")
initiators = parts[0].replace(",","").split(" ")
op_time = int(initiators[-1])+start_t
initiators = initiators[:-1]
papers = parts[1].replace("\n","").split(":")
papers = sort_papers(papers)
papers = [list(map(lambda x: x.replace(",",""),i)) for i in list(map(lambda x:x.split(" "),papers))]
#---- Extract the authors from the paper list
flatten = []
for i in papers:
flatten = flatten+i[:-1]
u,i = np.unique(flatten,return_index=True)
cascade_nodes = list(u[ | np.argsort(i) | numpy.argsort |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 14:55:00 2020
@author: rdamseh
"""
import numpy as np
from tqdm import tqdm
class CreateCylinderMappings:
'''
This class create 3D maps based on oriented cylinders built at each graph edge
'''
def __init__(self, g, to_return=['binary',
'velocity',
'so2',
'hct',
'gradient',
'propagation']):
self.g=g
self.GetImSize()
# set the needed outputs
self.tags={'binary':1,
'velocity':0,
'so2':0,
'hct':0,
'gradient':0,
'propagation':0}
for i in to_return:
self.tags[i]=1
def GetImSize(self):
# shift graph geometry to start from zero coordinates
# and
# set min radius to 2.0
min_rad=2.0
pos=np.array(self.g.GetNodesPos())
pos=pos-np.min(pos, axis=0)[None, :]
rad=np.array(self.g.GetRadii())
rad[rad<min_rad]=min_rad
maxr=np.max(rad)
for i, p, r in zip(self.g.GetNodes(), pos, rad):
self.g.node[i]['pos']=p+maxr
self.g.node[i]['r']=r
# get image size to be constructed
real_s = np.max(pos, axis=0) # real image size
new_s=real_s
new_s=tuple((np.ceil(new_s+(2*maxr))).astype(int)) # image size after padding
print('Image size: '+str(new_s))
self.real_s = real_s
self.new_s = new_s
self.niter = self.g.number_of_edges()
def cylinder(self, direction, radius, length):
'''
Create a image cylinder
'''
r=length+2*radius
r=int(r)
#print('r value', r)
xrange, yrange, zrange = np.meshgrid(np.arange(-r, r+1),
np.arange(-r, r+1),
np.arange(-r, r+1), indexing='ij')
size=np.shape(xrange)
direction=direction.astype(float)
va=np.sqrt((direction**2).sum())
vnorm=direction/va
p=np.array([xrange.ravel(), yrange.ravel(), zrange.ravel()]).T
p=p.astype(float)
amp=np.sqrt(np.sum(p**2, axis=1))
amp[amp<1]=1
cos=np.abs(np.sum(p*vnorm, axis=1)/amp)
cos[cos>1]=1
sin=np.sqrt(1-cos**2)
shape0=(amp*sin)<radius # radius constrain
shape1=(amp*cos<length) # length constrain
a1=amp*cos-length
a2=amp*sin
shape2=(((a1**2+a2**2)**0.5)<(radius)) # rounded end constrain
shape=shape0*(shape2+shape1)
shape=np.reshape(shape, xrange.shape)
c0 = np.where(shape)
dot=np.sum(p*vnorm, axis=1)
dot=((dot-dot.min())/(dot.max()-dot.min()))
shape=shape*dot.reshape(shape.shape)
return c0, size
def get_cylinder_infos(self, g, radius_scaling=None):
info=dict()
if self.tags['binary']:
e=g.GetEdges()
pos1=np.array([g.node[i[0]]['pos'] for i in e])
pos2=np.array([g.node[i[1]]['pos'] for i in e])
radius1=np.array([g.node[i[0]]['r'] for i in e])
radius2=np.array([g.node[i[1]]['r'] for i in e])
radius=(radius1+radius2)/2.0# radius
if radius_scaling is not None:
radius*=radius_scaling
info['pos1']=pos1
info['pos2']=pos2
info['radius']=radius
vec=pos2-pos1
vec_amp=np.sqrt(np.sum(vec**2, axis=1))# norm
vec_amp[vec_amp==0]=1.0 # avoid divide by zero
vec_norm=vec/vec_amp[:, None]
# for edges of length < 2 set to length to 3 to avoid diconnedted maps
vec_amp[vec_amp<2.0]=2.0
info['vec_amp']=vec_amp
info['vec_norm']=vec_norm
if self.tags['so2']:
so21=np.array([g.node[i[0]]['so2'] for i in e])
so22=np.array([g.node[i[1]]['so2'] for i in e])
info['so21']=so21
info['so22']=so22
if self.tags['hct']:
types=np.array([g.node[i[0]]['type'] for i in e])
if types.max()==3: types-=1 # types should be 0-->Art., 1-->Vein, 2-->Capp
info['types']=types
if self.tags['velocity']:
velocity=np.array([g.node[i[0]]['velocity'] for i in e])
dx=np.array([g.node[i[0]]['dx'] for i in e])
dy=np.array([g.node[i[0]]['dy'] for i in e])
dz=np.array([g.node[i[0]]['dz'] for i in e])
info['velocity']=velocity
info['dx']=dx
info['dy']=dy
info['dz']=dz
if self.tags['propagation']:
try:
label1=np.array([g.node[i[0]]['label'] for i in e])
label2=np.array([g.node[i[1]]['label'] for i in e])
info['label1']=label1
info['label2']=label2
except:
print('--Cannot return \'propagation\'; no labels on input graph!')
self.tags['propagation']=False
return info
def GetOutput(self,
radius_scaling=None,
hct_values=[0.33, 0.44, 0.44]):
'''
Input:
resolution: This in the number of points interplated at each graph edge
radius_scaling: This factor used to increase/decrease the overll radius size
hct_values: A list in the format [hct_in_arteriols, hct_in_venules, hct_in_cappilaries]
'''
info = self.get_cylinder_infos(self.g, radius_scaling=radius_scaling)
real_s, new_s, niter = self.real_s, self.new_s, self.niter
if self.tags['binary']:
binary_image= | np.zeros(new_s) | numpy.zeros |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
from moviepy.editor import VideoFileClip
import glob
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
# average x values of the fitted line over the last n iterations
self.bestx = None
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# radius of curvature of the line in some units
self.radius_of_curvature = None
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
def fit_poly(img_shape, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fit, right_fit, left_fitx, right_fitx, ploty
def search_around_poly(binary_warped, left_fit, right_fit):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
margin = 100
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(
binary_warped.shape, leftx, lefty, rightx, righty)
# Fit new polynomials (in the real world)
left_fit_cr, right_fit_cr, left_fitx_cr, right_fitx_cr, ploty_cr = fit_poly(
binary_warped.shape, leftx * xm_per_pix, lefty * ym_per_pix, rightx * xm_per_pix, righty * ym_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = (
(1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = (
(1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / | np.absolute(2*right_fit[0]) | numpy.absolute |
import json
import os
import os.path
import cv2
import numpy as np
import torch
import torch.utils.data as data_utl
from tqdm import tqdm
from dataset.vidor import VidOR
from frames import extract_all_frames
def video_to_tensor(pic):
"""Convert a ``numpy.ndarray`` to tensor.
Converts a numpy.ndarray (T x H x W x C)
to a torch.FloatTensor of shape (C x T x H x W)
Args:
pic (numpy.ndarray): Video to be converted to tensor.
Returns:
Tensor: Converted video.
"""
return torch.from_numpy(pic.transpose([3, 0, 1, 2]))
def load_rgb_frames(video_path, image_dir, begin, end, extract_frames=False):
"""
:param video_path: if u need 2 extract frames, but b careful, this setting needs a long time!
:param image_dir: This is image dir, but not same with extract frames func
:param begin:
:param end:
:param extract_frames:
:return:
"""
frames = []
video_path_splits = video_path.split('/')
image_dir_path = os.path.join(image_dir, video_path_splits[-2], video_path_splits[-1][:-4])
if extract_frames:
# Be careful! This step will take a long time!
extract_all_frames(video_path, image_dir_path)
for i in range(begin, end):
img_path = os.path.join(image_dir_path, str(i).zfill(6) + '.jpg')
if os.path.exists(img_path):
img = cv2.imread(img_path)[:, :, [2, 1, 0]]
w, h, c = img.shape
if w < 226 or h < 226:
d = 226. - min(w, h)
sc = 1 + d / min(w, h)
img = cv2.resize(img, dsize=(0, 0), fx=sc, fy=sc)
img = (img / 255.) * 2 - 1
frames.append(img)
else:
if len(frames) >= 1:
frames.append(frames[-1])
# final relength the frames list
for miss_frame in range(end - begin - len(frames)):
frames.insert(0, frames[0])
return | np.asarray(frames, dtype=np.float32) | numpy.asarray |
import numpy as np
import pandas
import pytest
import torch
from common import assert_close
from millipede import NormalLikelihoodSampler, NormalLikelihoodVariableSelector
from millipede.util import namespace_to_numpy, stack_namespaces
@pytest.mark.parametrize("precompute_XX", [False, True])
@pytest.mark.parametrize("prior", ["gprior", "isotropic"])
@pytest.mark.parametrize("include_intercept", [True, False])
@pytest.mark.parametrize("variable_S_X_assumed", [(False, False), (True, True)])
@pytest.mark.parametrize("device", ["gpu", "cpu"])
def test_linear_correlated(device, prior, precompute_XX, include_intercept, variable_S_X_assumed,
N=128, P=16, intercept=2.34, T=4000, T_burnin=200, report_frequency=1100, seed=1):
if device == "gpu" and not torch.cuda.is_available():
return
variable_S, X_assumed = variable_S_X_assumed
torch.manual_seed(seed)
X = torch.randn(N, P).double()
Z = torch.randn(N).double()
X[:, 0:2] = Z.unsqueeze(-1) + 0.001 * torch.randn(N, 2).double()
Y = Z + 0.05 * torch.randn(N).double()
if include_intercept:
Y += intercept
X_assumed = torch.randn(N, 2).double() if X_assumed else None
if X_assumed is not None:
Y += 0.5 * X_assumed[:, -1]
S = 1.0 if not variable_S else (0.25, 0.25 * P - 0.25)
samples = []
if device == "cpu":
sampler = NormalLikelihoodSampler(X, Y, X_assumed=X_assumed,
precompute_XX=precompute_XX, prior=prior,
compute_betas=True, S=S, nu0=0.0, lambda0=0.0,
tau=0.01, c=100.0, include_intercept=include_intercept,
tau_intercept=1.0e-4)
elif device == "gpu":
sampler = NormalLikelihoodSampler(X.cuda(), Y.cuda(),
X_assumed=X_assumed.cuda() if X_assumed is not None else None,
precompute_XX=precompute_XX, prior=prior,
compute_betas=True, S=S, nu0=0.0, lambda0=0.0,
tau=0.01, c=100.0, include_intercept=include_intercept,
tau_intercept=1.0e-4)
for t, (burned, s) in enumerate(sampler.mcmc_chain(T=T, T_burnin=T_burnin, seed=seed)):
if burned:
samples.append(namespace_to_numpy(s))
samples = stack_namespaces(samples)
weights = samples.weight / samples.weight.sum()
pip = | np.dot(samples.add_prob.T, weights) | numpy.dot |
import os
import numpy as np
from subprocess import Popen, PIPE
from scipy.interpolate import griddata
import _pickle as cPickle
from sklearn.neighbors import KDTree
from Bio.PDB import *
from Bio.SeqUtils import seq1, seq3
from Bio.Seq import Seq
from Bio import SeqIO
from default_config.bin_path import bin_path
from default_config.dir_options import dir_opts
from compute_surface.protonate import protonate
from compute_surface.extractPDB import extractPDB
from compute_surface.extract_xyzrn import extract_xyzrn
from compute_surface.apply_msms import computeMSMS
from features.Hydrophobicity import kd_scale
from features.pKa import pKa_scale
from features.macc import macc
from features.Physicochemical import li_scale
class RBP():
def __init__(self, protein_name):
self.pdb_id, self.chain = protein_name.split('_') #complex, protein, rna
#download pdb, call Reduce and MSMS
self._download_pdb()
self.model = self._load_pdb()
self.RNA_space, self.RNA_chain = self._get_RNA_space()
self.vertices, self.vert_info = self._get_surface() #ndarray (n*3), list(n) A_19_x_VAL_HG11
#Extract the sequence of protien
self.seq, self.index2resid, self.resid2Residue = self._get_pdb_seq(self.chain, kd_scale)
#Get the coordinates of atom of all RNAs in complex
self.geometric_center = self._get_geometric_center()
#Get res_id on the surface
self.surface_res = self._get_surface_res()
#Get surface center of each residue to calculate the sampling density on the surface.
self.res_to_vertice = self._get_res_to_vertice()
#Calculate the label of each surface_res
self.label, self.all_label = self._get_label()
def _download_pdb(self):
if not os.path.exists(dir_opts['raw_pdb_dir']):
os.makedirs(dir_opts['raw_pdb_dir'])
pdbl = PDBList()
pdbl.retrieve_pdb_file(self.pdb_id, pdir=dir_opts['raw_pdb_dir'], file_format='mmCif')
def _get_surface(self):
extractPDB(self.pdb_id + '_' + self.chain)
protonate(self.pdb_id+'_'+self.chain)
extract_xyzrn(self.pdb_id, self.chain)
vertices, faces1, normalv1, vert_info = computeMSMS(self.pdb_id, self.chain)
return vertices, vert_info
def _load_pdb(self):
pdb_file = os.path.join(dir_opts['raw_pdb_dir'], self.pdb_id+'.cif')
parser = MMCIFParser(QUIET=True)
struct = parser.get_structure(self.pdb_id, pdb_file)
model = Selection.unfold_entities(struct, "M")[0]
return model
def _get_pdb_seq(self, chain, scale):
chain = self.model.child_dict[chain]
res_seq = ''
index2resid = {}
resid2structure = {}
index = 0
for Residue in chain.child_list:
if Residue.get_resname() in scale.keys():
res_seq += (seq1(Residue.get_resname()))
index2resid[index] = Residue.get_id()[1]
resid2structure[Residue.get_id()[1]] = Residue
index += 1
return res_seq, index2resid, resid2structure
def _get_RNA_space(self):
RNA_list = ["A", "C", "G", "U"]
RNA_chain = set()
atom_list = []
for chain in self.model.child_list:
if chain.id == self.chain:
continue
for res in chain.child_list:
atom_type = res.resname.strip()
if atom_type in RNA_list:
for atom in res:
atom_list.append(atom.coord)
RNA_chain.add(chain.id)
chain = '_'.join(RNA_chain)
return atom_list, chain
def _get_label(self):
rna_tree = KDTree(self.RNA_space)
label_dict = {}
for res_id in self.surface_res:
res = self.resid2Residue[res_id]
res_coord = []
for atom in res.child_list:
res_coord.append(atom.coord)
d, t = rna_tree.query(res_coord)
if np.min(d) < 5.0:
label_dict[res_id] = 1
else:
label_dict[res_id] = 0
label_all = []
for index in range(len(self.seq)):
if self.index2resid[index] in self.surface_res:
label_all.append(label_dict[self.index2resid[index]])
else:
label_all.append(0)
return label_dict, | np.array(label_all) | numpy.array |
"""
=============================================================
Angle-based Joint and Individual Variation Explained (AJIVE)
=============================================================
Adopted from the code at https://github.com/idc9/py_jive and tutorial
written by:
Author: <NAME>
License: MIT License
<blockquote>
[1] Lock, <NAME>., et al. “Joint and Individual Variation Explained (JIVE)
for Integrated Analysis of Multiple Data Types.” The Annals of Applied
Statistics, vol. 7, no. 1, 2013, pp. 523–542., doi:10.1214/12-aoas597.
</blockquote>
AJIVE is a useful algorithm that decomposes multiple views of data into two
main pieces
- Joint Variation
- Individual Variation
whose sum is the original data minus noise. This notebook will demonstrate the
functionality of AJIVE and show some examples of the algorithm's usefulness.
"""
import numpy as np
from mvlearn.decomposition import AJIVE
import seaborn as sns
import matplotlib.pyplot as plt
##############################################################################
# ## Data Creation
#
# Here we create data in the same way detailed in the initial JIVE paper:
#
# The two views are created with shared joint variation, unique individual
# variation, and independent noise.
#
np.random.seed(12)
# First View
X1_joint = np.vstack([-1 * np.ones((10, 20)), np.ones((10, 20))])
X1_joint = np.hstack([np.zeros((20, 80)), X1_joint])
X1_indiv_t = np.vstack([
np.ones((4, 50)),
-1 * np.ones((4, 50)),
np.zeros((4, 50)),
np.ones((4, 50)),
-1 * np.ones((4, 50)),
])
X1_indiv_b = np.vstack(
[np.ones((5, 50)), -1 * np.ones((10, 50)), np.ones((5, 50))]
)
X1_indiv_tot = np.hstack([X1_indiv_t, X1_indiv_b])
X1_noise = np.random.normal(loc=0, scale=1, size=(20, 100))
# Second View
X2_joint = np.vstack([np.ones((10, 10)), -1 * | np.ones((10, 10)) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
histogra_warping
@author: vik748
"""
import numpy as np
import sys, os
import scipy.stats as st
from scipy.signal import argrelmin, argrelmax
from scipy import interpolate as interp
from scipy import integrate
try:
import cv2
opencv_available = True
except ModuleNotFoundError as e:
print("Opencv Not avialable using 0 order hold for resizing: {}".format(e))
opencv_available = False
try:
from matplotlib import pyplot as plt
matplotlib_available = True
except ImportError as e:
matplotlib_available = False
class HistogramWarpingACE:
def __init__(self, no_bits=8, tau=0.01, lam=5, adjustment_factor=1.0, stretch_factor=1.0,
min_stretch_bits=4, downsample_for_kde=True,debug=False, plot_histograms=False):
self.no_bits = no_bits
self.no_gray_levels = 2 ** self.no_bits
self.x = np.linspace(0,1, self.no_gray_levels)
self.tau = tau
self.lam = lam
self.adjustment_factor = adjustment_factor
self.stretch_factor = stretch_factor
self.min_stretch_bits = min_stretch_bits
self.downsample_for_kde = downsample_for_kde
self.debug = debug
self.plot_histograms = plot_histograms
if self.plot_histograms and not matplotlib_available:
print("matplotlib not available turning off histograms")
self.plot_histograms = False
@staticmethod
def calc_scale_factor(orig_size):
'''
Given a tuple of image size, returns the integer scale factor that
brings the tuple below 1000 pixels
Parameters
----------
orig_size : tuple of ints
Returns
-------
int
'''
orig_size_arr = np.array(orig_size)
scale_found = False
scale_factor = 0
while not scale_found:
scale_factor += 1
new_size = orig_size_arr / scale_factor
if np.max(new_size) < 1000:
remainders = new_size % scale_factor
scale_found = remainders[0] == 0 and remainders[1] == 0
return int(scale_factor)
@staticmethod
def downsample_image(gr, debug=False, max_pixels=1000):
if np.max(gr.shape) > max_pixels:
sc_fac = calc_scale_factor(gr.shape)
if debug: print("Scale factor = ", sc_fac)
if opencv_available:
gr_sc = cv2.resize(gr, (0,0), fx=1/sc_fac, fy=1/sc_fac, interpolation=cv2.INTER_AREA)
else:
gr_sc = gr[::sc_fac,::sc_fac]
else:
gr_sc = gr
return gr_sc
@staticmethod
def gen_F_inverse(F,x_d, delta = 1e-4):
'''
Given a cumulative F and gray values x_d
'''
zero_indices = np.where(F<delta)[0]
last_zero = zero_indices[-1] if len(zero_indices)>0 else 0
one_indices = np.where(F>(1-delta))[0]
first_one = one_indices[0] if len(one_indices)>0 else len(F)
F_trunc = np.copy(F[last_zero : first_one])
F_trunc[0] = 0
F_trunc[-1] = 1
x_d_trunc = np.copy(x_d[last_zero : first_one])
#for f,x in zip(F_trunc, x_d_trunc):
# print(x,f)
F_interp = interp.interp1d(F_trunc, x_d_trunc)
return F_interp
@staticmethod
def make_increasing(arr):
indx, = | np.where(arr[:-1] > arr[1:]) | numpy.where |
#! /usr/bin/env python
"""
Bit error rate tester (BERT) simulator, written in Python.
Original Author: <NAME> <<EMAIL>>
Original Date: 17 June 2014
Testing by: <NAME> <<EMAIL>>
This Python script provides a GUI interface to a BERT simulator, which
can be used to explore the concepts of serial communication link design.
Copyright (c) 2014 by <NAME>; All rights reserved World wide.
"""
from traits.etsconfig.api import ETSConfig
# ETSConfig.toolkit = 'qt.celiagg' # Yields unacceptably small font sizes in plot axis labels.
ETSConfig.toolkit = 'qt.qpainter' # Was causing crash on Mac.
from datetime import datetime
import platform
from threading import Event, Thread
from time import sleep
from math import isnan
from cmath import rect, phase
from chaco.api import ArrayPlotData, GridPlotContainer
import numpy as np
from numpy import array, convolve, cos, diff, exp, ones, pad, pi, real, resize, sinc, where, zeros
from numpy.fft import fft, irfft
from numpy.random import randint
from os.path import dirname, join
from scipy.optimize import minimize, minimize_scalar
from traits.api import (
HTML,
Array,
Bool,
Button,
Enum,
File,
Float,
HasTraits,
Instance,
Int,
List,
Property,
Range,
String,
cached_property,
Trait,
)
from traitsui.message import message
import skrf as rf
from pyibisami import __version__ as PyAMI_VERSION
from pyibisami.ami_parse import AMIParamConfigurator
from pyibisami.ami_model import AMIModel
from pyibisami.ibis_file import IBISModel
from pybert import __version__ as VERSION
from pybert import __date__ as DATE
from pybert import __authors__ as AUTHORS
from pybert import __copy__ as COPY
from pybert.pybert_cntrl import my_run_simulation
from pybert.pybert_help import help_str
from pybert.pybert_plot import make_plots
from pybert.pybert_util import (
calc_G,
calc_gamma,
import_channel,
lfsr_bits,
make_ctle,
pulse_center,
safe_log10,
trim_impulse,
submodules,
sdd_21,
H_2_s2p,
interp_time,
cap_mag,
interp_s2p,
renorm_s2p,
)
from pybert.pybert_view import traits_view
gDebugStatus = False
gDebugOptimize = False
gMaxCTLEPeak = 20.0 # max. allowed CTLE peaking (dB) (when optimizing, only)
gMaxCTLEFreq = 20.0 # max. allowed CTLE peak frequency (GHz) (when optimizing, only)
# Default model parameters - Modify these to customize the default simulation.
# - Simulation Control
gBitRate = 10 # (Gbps)
gNbits = 8000 # number of bits to run
gPatLen = 127 # repeating bit pattern length
gNspb = 32 # samples per bit
gNumAve = 1 # Number of bit error samples to average, when sweeping.
# - Channel Control
# - parameters for Howard Johnson's "Metallic Transmission Model"
# - (See "High Speed Signal Propagation", Sec. 3.1.)
# - ToDo: These are the values for 24 guage twisted copper pair; need to add other options.
gRdc = 0.1876 # Ohms/m
gw0 = 10.0e6 # 10 MHz is recommended in Ch. 8 of his second book, in which UTP is described in detail.
gR0 = 1.452 # skin-effect resistance (Ohms/m)log
gTheta0 = 0.02 # loss tangent
gZ0 = 100.0 # characteristic impedance in LC region (Ohms)
gv0 = 0.67 # relative propagation velocity (c)
gl_ch = 1.0 # cable length (m)
gRn = (
0.001
) # standard deviation of Gaussian random noise (V) (Applied at end of channel, so as to appear white to Rx.)
# - Tx
gVod = 1.0 # output drive strength (Vp)
gRs = 100 # differential source impedance (Ohms)
gCout = 0.50 # parasitic output capacitance (pF) (Assumed to exist at both 'P' and 'N' nodes.)
gPnMag = 0.001 # magnitude of periodic noise (V)
gPnFreq = 0.437 # frequency of periodic noise (MHz)
# - Rx
gRin = 100 # differential input resistance
gCin = 0.50 # parasitic input capacitance (pF) (Assumed to exist at both 'P' and 'N' nodes.)
gCac = 1.0 # a.c. coupling capacitance (uF) (Assumed to exist at both 'P' and 'N' nodes.)
gBW = 12.0 # Rx signal path bandwidth, assuming no CTLE action. (GHz)
gUseDfe = False # Include DFE when running simulation.
gDfeIdeal = True # DFE ideal summing node selector
gPeakFreq = 5.0 # CTLE peaking frequency (GHz)
gPeakMag = 10.0 # CTLE peaking magnitude (dB)
gCTLEOffset = 0.0 # CTLE d.c. offset (dB)
# - DFE
gDecisionScaler = 0.5
gNtaps = 5
gGain = 0.5
gNave = 100
gDfeBW = 12.0 # DFE summing node bandwidth (GHz)
# - CDR
gDeltaT = 0.1 # (ps)
gAlpha = 0.01
gNLockAve = 500 # number of UI used to average CDR locked status.
gRelLockTol = 0.1 # relative lock tolerance of CDR.
gLockSustain = 500
# - Analysis
gThresh = 6 # threshold for identifying periodic jitter spectral elements (sigma)
class StoppableThread(Thread):
"""
Thread class with a stop() method.
The thread itself has to check regularly for the stopped() condition.
All PyBERT thread classes are subclasses of this class.
"""
def __init__(self):
super(StoppableThread, self).__init__()
self._stop_event = Event()
def stop(self):
"""Called by thread invoker, when thread should be stopped prematurely."""
self._stop_event.set()
def stopped(self):
"""Should be called by thread (i.e. - subclass) periodically and, if this function
returns True, thread should clean itself up and quit ASAP.
"""
return self._stop_event.is_set()
class TxOptThread(StoppableThread):
"""Used to run Tx tap weight optimization in its own thread,
in order to preserve GUI responsiveness.
"""
def run(self):
"""Run the Tx equalization optimization thread."""
pybert = self.pybert
if self.update_status:
pybert.status = "Optimizing Tx..."
max_iter = pybert.max_iter
old_taps = []
min_vals = []
max_vals = []
for tuner in pybert.tx_tap_tuners:
if tuner.enabled:
old_taps.append(tuner.value)
min_vals.append(tuner.min_val)
max_vals.append(tuner.max_val)
cons = {"type": "ineq", "fun": lambda x: 0.7 - sum(abs(x))}
bounds = list(zip(min_vals, max_vals))
try:
if gDebugOptimize:
res = minimize(
self.do_opt_tx,
old_taps,
bounds=bounds,
constraints=cons,
options={"disp": True, "maxiter": max_iter},
)
else:
res = minimize(
self.do_opt_tx,
old_taps,
bounds=bounds,
constraints=cons,
options={"disp": False, "maxiter": max_iter},
)
if self.update_status:
if res["success"]:
pybert.status = "Optimization succeeded."
else:
pybert.status = "Optimization failed: {}".format(res["message"])
except Exception as err:
pybert.status = err
def do_opt_tx(self, taps):
"""Run the Tx Optimization."""
sleep(0.001) # Give the GUI a chance to acknowledge user clicking the Abort button.
if self.stopped():
raise RuntimeError("Optimization aborted.")
pybert = self.pybert
tuners = pybert.tx_tap_tuners
taps = list(taps)
for tuner in tuners:
if tuner.enabled:
tuner.value = taps.pop(0)
return pybert.cost
class RxOptThread(StoppableThread):
"""Used to run Rx tap weight optimization in its own thread,
in order to preserve GUI responsiveness.
"""
def run(self):
"""Run the Rx equalization optimization thread."""
pybert = self.pybert
pybert.status = "Optimizing Rx..."
max_iter = pybert.max_iter
try:
if gDebugOptimize:
res = minimize_scalar(
self.do_opt_rx,
bounds=(0, gMaxCTLEPeak),
method="Bounded",
options={"disp": True, "maxiter": max_iter},
)
else:
res = minimize_scalar(
self.do_opt_rx,
bounds=(0, gMaxCTLEPeak),
method="Bounded",
options={"disp": False, "maxiter": max_iter},
)
if res["success"]:
pybert.status = "Optimization succeeded."
else:
pybert.status = "Optimization failed: {}".format(res["message"])
except Exception as err:
pybert.status = err
def do_opt_rx(self, peak_mag):
"""Run the Rx Optimization."""
sleep(0.001) # Give the GUI a chance to acknowledge user clicking the Abort button.
if self.stopped():
raise RuntimeError("Optimization aborted.")
pybert = self.pybert
pybert.peak_mag_tune = peak_mag
return pybert.cost
class CoOptThread(StoppableThread):
"""Used to run co-optimization in its own thread, in order to preserve GUI responsiveness."""
def run(self):
"""Run the Tx/Rx equalization co-optimization thread."""
pybert = self.pybert
pybert.status = "Co-optimizing..."
max_iter = pybert.max_iter
try:
if gDebugOptimize:
res = minimize_scalar(
self.do_coopt,
bounds=(0, gMaxCTLEPeak),
method="Bounded",
options={"disp": True, "maxiter": max_iter},
)
else:
res = minimize_scalar(
self.do_coopt,
bounds=(0, gMaxCTLEPeak),
method="Bounded",
options={"disp": False, "maxiter": max_iter},
)
if res["success"]:
pybert.status = "Optimization succeeded."
else:
pybert.status = "Optimization failed: {}".format(res["message"])
except Exception as err:
pybert.status = err
def do_coopt(self, peak_mag):
"""Run the Tx and Rx Co-Optimization."""
sleep(0.001) # Give the GUI a chance to acknowledge user clicking the Abort button.
if self.stopped():
raise RuntimeError("Optimization aborted.")
pybert = self.pybert
pybert.peak_mag_tune = peak_mag
if any([pybert.tx_tap_tuners[i].enabled for i in range(len(pybert.tx_tap_tuners))]):
while pybert.tx_opt_thread and pybert.tx_opt_thread.isAlive():
sleep(0.001)
pybert._do_opt_tx(update_status=False)
while pybert.tx_opt_thread and pybert.tx_opt_thread.isAlive():
sleep(0.001)
return pybert.cost
class TxTapTuner(HasTraits):
"""Object used to populate the rows of the Tx FFE tap tuning table."""
name = String("(noname)")
enabled = Bool(False)
min_val = Float(0.0)
max_val = Float(0.0)
value = Float(0.0)
steps = Int(0) # Non-zero means we want to sweep it.
def __init__(self, name="(noname)", enabled=False, min_val=0.0, max_val=0.0, value=0.0, steps=0):
"""Allows user to define properties, at instantiation."""
# Super-class initialization is ABSOLUTELY NECESSARY, in order
# to get all the Traits/UI machinery setup correctly.
super(TxTapTuner, self).__init__()
self.name = name
self.enabled = enabled
self.min_val = min_val
self.max_val = max_val
self.value = value
self.steps = steps
class PyBERT(HasTraits):
"""
A serial communication link bit error rate tester (BERT) simulator with a GUI interface.
Useful for exploring the concepts of serial communication link design.
"""
# Independent variables
# - Simulation Control
bit_rate = Range(low=0.1, high=120.0, value=gBitRate) #: (Gbps)
nbits = Range(low=1000, high=10000000, value=gNbits) #: Number of bits to simulate.
pattern_len = Range(low=7, high=10000000, value=gPatLen) #: PRBS pattern length.
nspb = Range(low=2, high=256, value=gNspb) #: Signal vector samples per bit.
eye_bits = Int(gNbits // 5) #: # of bits used to form eye. (Default = last 20%)
mod_type = List([0]) #: 0 = NRZ; 1 = Duo-binary; 2 = PAM-4
num_sweeps = Int(1) #: Number of sweeps to run.
sweep_num = Int(1)
sweep_aves = Int(gNumAve)
do_sweep = Bool(False) #: Run sweeps? (Default = False)
debug = Bool(False) #: Send log messages to terminal, as well as console, when True. (Default = False)
# - Channel Control
ch_file = File(
"", entries=5, filter=["*.s4p", "*.S4P", "*.csv", "*.CSV", "*.txt", "*.TXT", "*.*"]
) #: Channel file name.
use_ch_file = Bool(False) #: Import channel description from file? (Default = False)
# padded = Bool(False) #: Zero pad imported Touchstone data? (Default = False)
# windowed = Bool(False) #: Apply windowing to the Touchstone data? (Default = False)
f_step = Float(10) #: Frequency step to use when constructing H(f). (Default = 10 MHz)
impulse_length = Float(0.0) #: Impulse response length. (Determined automatically, when 0.)
Rdc = Float(gRdc) #: Channel d.c. resistance (Ohms/m).
w0 = Float(gw0) #: Channel transition frequency (rads./s).
R0 = Float(gR0) #: Channel skin effect resistance (Ohms/m).
Theta0 = Float(gTheta0) #: Channel loss tangent (unitless).
Z0 = Float(gZ0) #: Channel characteristic impedance, in LC region (Ohms).
v0 = Float(gv0) #: Channel relative propagation velocity (c).
l_ch = Float(gl_ch) #: Channel length (m).
# - EQ Tune
tx_tap_tuners = List(
[
TxTapTuner(name="Pre-tap", enabled=True, min_val=-0.2, max_val=0.2, value=0.0),
TxTapTuner(name="Post-tap1", enabled=False, min_val=-0.4, max_val=0.4, value=0.0),
TxTapTuner(name="Post-tap2", enabled=False, min_val=-0.3, max_val=0.3, value=0.0),
TxTapTuner(name="Post-tap3", enabled=False, min_val=-0.2, max_val=0.2, value=0.0),
]
) #: EQ optimizer list of TxTapTuner objects.
rx_bw_tune = Float(gBW) #: EQ optimizer CTLE bandwidth (GHz).
peak_freq_tune = Float(gPeakFreq) #: EQ optimizer CTLE peaking freq. (GHz).
peak_mag_tune = Float(gPeakMag) #: EQ optimizer CTLE peaking mag. (dB).
ctle_offset_tune = Float(gCTLEOffset) #: EQ optimizer CTLE d.c. offset (dB).
ctle_mode_tune = Enum(
"Off", "Passive", "AGC", "Manual"
) #: EQ optimizer CTLE mode ('Off', 'Passive', 'AGC', 'Manual').
use_dfe_tune = Bool(gUseDfe) #: EQ optimizer DFE select (Bool).
n_taps_tune = Int(gNtaps) #: EQ optimizer # DFE taps.
max_iter = Int(50) #: EQ optimizer max. # of optimization iterations.
tx_opt_thread = Instance(TxOptThread) #: Tx EQ optimization thread.
rx_opt_thread = Instance(RxOptThread) #: Rx EQ optimization thread.
coopt_thread = Instance(CoOptThread) #: EQ co-optimization thread.
# - Tx
vod = Float(gVod) #: Tx differential output voltage (V)
rs = Float(gRs) #: Tx source impedance (Ohms)
cout = Range(low=0.001, high=1000, value=gCout) #: Tx parasitic output capacitance (pF)
pn_mag = Float(gPnMag) #: Periodic noise magnitude (V).
pn_freq = Float(gPnFreq) #: Periodic noise frequency (MHz).
rn = Float(gRn) #: Standard deviation of Gaussian random noise (V).
tx_taps = List(
[
TxTapTuner(name="Pre-tap", enabled=True, min_val=-0.2, max_val=0.2, value=0.0),
TxTapTuner(name="Post-tap1", enabled=False, min_val=-0.4, max_val=0.4, value=0.0),
TxTapTuner(name="Post-tap2", enabled=False, min_val=-0.3, max_val=0.3, value=0.0),
TxTapTuner(name="Post-tap3", enabled=False, min_val=-0.2, max_val=0.2, value=0.0),
]
) #: List of TxTapTuner objects.
rel_power = Float(1.0) #: Tx power dissipation (W).
tx_use_ami = Bool(False) #: (Bool)
tx_has_ts4 = Bool(False) #: (Bool)
tx_use_ts4 = Bool(False) #: (Bool)
tx_use_getwave = Bool(False) #: (Bool)
tx_has_getwave = Bool(False) #: (Bool)
tx_ami_file = File("", entries=5, filter=["*.ami"]) #: (File)
tx_ami_valid = Bool(False) #: (Bool)
tx_dll_file = File("", entries=5, filter=["*.dll", "*.so"]) #: (File)
tx_dll_valid = Bool(False) #: (Bool)
tx_ibis_file = File("", entries=5, filter=["IBIS Models (*.ibs)|*.ibs",]) #: (File)
tx_ibis_valid = Bool(False) #: (Bool)
tx_use_ibis = Bool(False) #: (Bool)
# - Rx
rin = Float(gRin) #: Rx input impedance (Ohm)
cin = Range(low=0, high=1000, value=gCin) #: Rx parasitic input capacitance (pF)
cac = Float(gCac) #: Rx a.c. coupling capacitance (uF)
use_ctle_file = Bool(False) #: For importing CTLE impulse/step response directly.
ctle_file = File("", entries=5, filter=["*.csv"]) #: CTLE response file (when use_ctle_file = True).
rx_bw = Float(gBW) #: CTLE bandwidth (GHz).
peak_freq = Float(gPeakFreq) #: CTLE peaking frequency (GHz)
peak_mag = Float(gPeakMag) #: CTLE peaking magnitude (dB)
ctle_offset = Float(gCTLEOffset) #: CTLE d.c. offset (dB)
ctle_mode = Enum("Off", "Passive", "AGC", "Manual") #: CTLE mode ('Off', 'Passive', 'AGC', 'Manual').
rx_use_ami = Bool(False) #: (Bool)
rx_has_ts4 = Bool(False) #: (Bool)
rx_use_ts4 = Bool(False) #: (Bool)
rx_use_getwave = Bool(False) #: (Bool)
rx_has_getwave = Bool(False) #: (Bool)
rx_ami_file = File("", entries=5, filter=["*.ami"]) #: (File)
rx_ami_valid = Bool(False) #: (Bool)
rx_dll_file = File("", entries=5, filter=["*.dll", "*.so"]) #: (File)
rx_dll_valid = Bool(False) #: (Bool)
rx_ibis_file = File("", entries=5, filter=["*.ibs"]) #: (File)
rx_ibis_valid = Bool(False) #: (Bool)
rx_use_ibis = Bool(False) #: (Bool)
# - DFE
use_dfe = Bool(gUseDfe) #: True = use a DFE (Bool).
sum_ideal = Bool(gDfeIdeal) #: True = use an ideal (i.e. - infinite bandwidth) summing node (Bool).
decision_scaler = Float(gDecisionScaler) #: DFE slicer output voltage (V).
gain = Float(gGain) #: DFE error gain (unitless).
n_ave = Float(gNave) #: DFE # of averages to take, before making tap corrections.
n_taps = Int(gNtaps) #: DFE # of taps.
_old_n_taps = n_taps
sum_bw = Float(gDfeBW) #: DFE summing node bandwidth (Used when sum_ideal=False.) (GHz).
# - CDR
delta_t = Float(gDeltaT) #: CDR proportional branch magnitude (ps).
alpha = Float(gAlpha) #: CDR integral branch magnitude (unitless).
n_lock_ave = Int(gNLockAve) #: CDR # of averages to take in determining lock.
rel_lock_tol = Float(gRelLockTol) #: CDR relative tolerance to use in determining lock.
lock_sustain = Int(gLockSustain) #: CDR hysteresis to use in determining lock.
# - Analysis
thresh = Int(gThresh) #: Threshold for identifying periodic jitter components (sigma).
# Misc.
cfg_file = File("", entries=5, filter=["*.pybert_cfg"]) #: PyBERT configuration data storage file (File).
data_file = File("", entries=5, filter=["*.pybert_data"]) #: PyBERT results data storage file (File).
# Plots (plot containers, actually)
plotdata = ArrayPlotData()
plots_h = Instance(GridPlotContainer)
plots_s = Instance(GridPlotContainer)
plots_p = Instance(GridPlotContainer)
plots_H = Instance(GridPlotContainer)
plots_dfe = Instance(GridPlotContainer)
plots_eye = Instance(GridPlotContainer)
plots_jitter_dist = Instance(GridPlotContainer)
plots_jitter_spec = Instance(GridPlotContainer)
plots_bathtub = Instance(GridPlotContainer)
# Status
status = String("Ready.") #: PyBERT status (String).
jitter_perf = Float(0.0)
total_perf = Float(0.0)
sweep_results = List([])
len_h = Int(0)
chnl_dly = Float(0.0) #: Estimated channel delay (s).
bit_errs = Int(0) #: # of bit errors observed in last run.
run_count = Int(0) # Used as a mechanism to force bit stream regeneration.
# About
perf_info = Property(String, depends_on=["total_perf"])
ident = String(
'<H1>PyBERT v{} - a serial communication link design tool, written in Python.</H1>\n\n \
{}<BR>\n \
{}<BR><BR>\n\n \
{};<BR>\n \
All rights reserved World wide.'.format(
VERSION, AUTHORS, DATE, COPY
)
)
# Help
instructions = help_str
# Console
console_log = String("PyBERT Console Log\n\n")
# Dependent variables
# - Handled by the Traits/UI machinery. (Should only contain "low overhead" variables, which don't freeze the GUI noticeably.)
#
# - Note: Don't make properties, which have a high calculation overhead, dependencies of other properties!
# This will slow the GUI down noticeably.
jitter_info = Property(String, depends_on=["jitter_perf"])
status_str = Property(String, depends_on=["status"])
sweep_info = Property(String, depends_on=["sweep_results"])
tx_h_tune = Property(Array, depends_on=["tx_tap_tuners.value", "nspui"])
ctle_h_tune = Property(
Array,
depends_on=[
"peak_freq_tune",
"peak_mag_tune",
"rx_bw_tune",
"w",
"len_h",
"ctle_mode_tune",
"ctle_offset_tune",
"use_dfe_tune",
"n_taps_tune",
],
)
ctle_out_h_tune = Property(Array, depends_on=["tx_h_tune", "ctle_h_tune", "chnl_h"])
cost = Property(Float, depends_on=["ctle_out_h_tune", "nspui"])
rel_opt = Property(Float, depends_on=["cost"])
t = Property(Array, depends_on=["ui", "nspb", "nbits"])
t_ns = Property(Array, depends_on=["t"])
f = Property(Array, depends_on=["t"])
w = Property(Array, depends_on=["f"])
bits = Property(Array, depends_on=["pattern_len", "nbits", "run_count"])
symbols = Property(Array, depends_on=["bits", "mod_type", "vod"])
ffe = Property(Array, depends_on=["tx_taps.value", "tx_taps.enabled"])
ui = Property(Float, depends_on=["bit_rate", "mod_type"])
nui = Property(Int, depends_on=["nbits", "mod_type"])
nspui = Property(Int, depends_on=["nspb", "mod_type"])
eye_uis = Property(Int, depends_on=["eye_bits", "mod_type"])
dfe_out_p = Array()
przf_err = Property(Float, depends_on=["dfe_out_p"])
# Custom buttons, which we'll use in particular tabs.
# (Globally applicable buttons, such as "Run" and "Ok", are handled more simply, in the View.)
btn_rst_eq = Button(label="ResetEq")
btn_save_eq = Button(label="SaveEq")
btn_opt_tx = Button(label="OptTx")
btn_opt_rx = Button(label="OptRx")
btn_coopt = Button(label="CoOpt")
btn_abort = Button(label="Abort")
btn_cfg_tx = Button(label="Configure") # Configure AMI parameters.
btn_cfg_rx = Button(label="Configure")
btn_sel_tx = Button(label="Select") # Select IBIS model.
btn_sel_rx = Button(label="Select")
btn_view_tx = Button(label="View") # View IBIS model.
btn_view_rx = Button(label="View")
# Logger & Pop-up
def log(self, msg, alert=False, exception=None):
"""Log a message to the console and, optionally, to terminal and/or pop-up dialog."""
_msg = msg.strip()
txt = "\n[{}]: PyBERT: {}\n".format(datetime.now(), _msg)
if self.debug:
## In case PyBERT crashes, before we can read this in its `Console` tab:
print(txt)
self.console_log += txt
if exception:
raise exception
if alert and self.GUI:
message(_msg, "PyBERT Alert")
# Default initialization
def __init__(self, run_simulation=True, gui=True):
"""
Initial plot setup occurs here.
In order to populate the data structure we need to
construct the plots, we must run the simulation.
Args:
run_simulation(Bool): If true, run the simulation, as part
of class initialization. This is provided as an argument
for the sake of larger applications, which may be
importing PyBERT for its attributes and methods, and may
not want to run the full simulation. (Optional;
default = True)
gui(Bool): Set to `False` for script based usage.
"""
# Super-class initialization is ABSOLUTELY NECESSARY, in order
# to get all the Traits/UI machinery setup correctly.
super(PyBERT, self).__init__()
self.GUI = gui
self.log("Started.")
self.log_information()
if self.debug:
self.log("Debug Mode Enabled.")
if run_simulation:
# Running the simulation will fill in the required data structure.
my_run_simulation(self, initial_run=True)
# Once the required data structure is filled in, we can create the plots.
make_plots(self, n_dfe_taps=gNtaps)
else:
self.calc_chnl_h() # Prevents missing attribute error in _get_ctle_out_h_tune().
# Custom button handlers
def _btn_rst_eq_fired(self):
"""Reset the equalization."""
for i in range(4):
self.tx_tap_tuners[i].value = self.tx_taps[i].value
self.tx_tap_tuners[i].enabled = self.tx_taps[i].enabled
self.peak_freq_tune = self.peak_freq
self.peak_mag_tune = self.peak_mag
self.rx_bw_tune = self.rx_bw
self.ctle_mode_tune = self.ctle_mode
self.ctle_offset_tune = self.ctle_offset
self.use_dfe_tune = self.use_dfe
self.n_taps_tune = self.n_taps
def _btn_save_eq_fired(self):
"""Save the equalization."""
for i in range(4):
self.tx_taps[i].value = self.tx_tap_tuners[i].value
self.tx_taps[i].enabled = self.tx_tap_tuners[i].enabled
self.peak_freq = self.peak_freq_tune
self.peak_mag = self.peak_mag_tune
self.rx_bw = self.rx_bw_tune
self.ctle_mode = self.ctle_mode_tune
self.ctle_offset = self.ctle_offset_tune
self.use_dfe = self.use_dfe_tune
self.n_taps = self.n_taps_tune
def _btn_opt_tx_fired(self):
if (
self.tx_opt_thread
and self.tx_opt_thread.isAlive()
or not any([self.tx_tap_tuners[i].enabled for i in range(len(self.tx_tap_tuners))])
):
pass
else:
self._do_opt_tx()
def _do_opt_tx(self, update_status=True):
self.tx_opt_thread = TxOptThread()
self.tx_opt_thread.pybert = self
self.tx_opt_thread.update_status = update_status
self.tx_opt_thread.start()
def _btn_opt_rx_fired(self):
if self.rx_opt_thread and self.rx_opt_thread.isAlive() or self.ctle_mode_tune == "Off":
pass
else:
self.rx_opt_thread = RxOptThread()
self.rx_opt_thread.pybert = self
self.rx_opt_thread.start()
def _btn_coopt_fired(self):
if self.coopt_thread and self.coopt_thread.isAlive():
pass
else:
self.coopt_thread = CoOptThread()
self.coopt_thread.pybert = self
self.coopt_thread.start()
def _btn_abort_fired(self):
if self.coopt_thread and self.coopt_thread.isAlive():
self.coopt_thread.stop()
self.coopt_thread.join(10)
if self.tx_opt_thread and self.tx_opt_thread.isAlive():
self.tx_opt_thread.stop()
self.tx_opt_thread.join(10)
if self.rx_opt_thread and self.rx_opt_thread.isAlive():
self.rx_opt_thread.stop()
self.rx_opt_thread.join(10)
def _btn_cfg_tx_fired(self):
self._tx_cfg()
def _btn_cfg_rx_fired(self):
self._rx_cfg()
def _btn_sel_tx_fired(self):
self._tx_ibis()
if self._tx_ibis.dll_file and self._tx_ibis.ami_file:
self.tx_dll_file = join(self._tx_ibis_dir, self._tx_ibis.dll_file)
self.tx_ami_file = join(self._tx_ibis_dir, self._tx_ibis.ami_file)
else:
self.tx_dll_file = ""
self.tx_ami_file = ""
def _btn_sel_rx_fired(self):
self._rx_ibis()
if self._rx_ibis.dll_file and self._rx_ibis.ami_file:
self.rx_dll_file = join(self._rx_ibis_dir, self._rx_ibis.dll_file)
self.rx_ami_file = join(self._rx_ibis_dir, self._rx_ibis.ami_file)
else:
self.rx_dll_file = ""
self.rx_ami_file = ""
def _btn_view_tx_fired(self):
self._tx_ibis.model()
def _btn_view_rx_fired(self):
self._rx_ibis.model()
# Independent variable setting intercepts
# (Primarily, for debugging.)
def _set_ctle_peak_mag_tune(self, val):
if val > gMaxCTLEPeak or val < 0.0:
raise RuntimeError("CTLE peak magnitude out of range!")
self.peak_mag_tune = val
# Dependent variable definitions
@cached_property
def _get_t(self):
"""
Calculate the system time vector, in seconds.
"""
ui = self.ui
nspui = self.nspui
nui = self.nui
t0 = ui / nspui
npts = nui * nspui
return array([i * t0 for i in range(npts)])
@cached_property
def _get_t_ns(self):
"""
Calculate the system time vector, in ns.
"""
return self.t * 1.0e9
@cached_property
def _get_f(self):
"""
Calculate the frequency vector appropriate for indexing non-shifted FFT output, in Hz.
# (i.e. - [0, f0, 2 * f0, ... , fN] + [-(fN - f0), -(fN - 2 * f0), ... , -f0]
Note: Changed to positive freqs. only, in conjunction w/ irfft() usage.
"""
t = self.t
npts = len(t)
f0 = 1.0 / (t[1] * npts)
half_npts = npts // 2
return array([i * f0 for i in range(half_npts)])
@cached_property
def _get_w(self):
"""
System frequency vector, in rads./sec.
"""
return 2 * pi * self.f
@cached_property
def _get_bits(self):
"""
Generate the bit stream.
"""
pattern_len = self.pattern_len
nbits = self.nbits
mod_type = self.mod_type[0]
bits = []
seed = | randint(128) | numpy.random.randint |
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_allclose
from app.math_functions import (
build_translation_matrix,
build_scaling_matrix,
build_rotation_matrix,
build_reflection_matrix,
normalize_point,
bezier_blending_functions,
calculate_bezier_points,
calculate_bspline_parameters,
)
from app.config import (
X_MAX_TRANSLATED,
X_MIN_TRANSLATED,
Y_MAX_TRANSLATED,
Y_MIN_TRANSLATED,
MAX_NORMALIZED_VALUE,
MIN_NORMALIZED_VALUE,
)
def test_translation_matrix():
matrix = build_translation_matrix(30, 30, 0)
expected_matrix = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [30, 30, 0, 1]]
)
assert_array_equal(matrix, expected_matrix)
def test_scaling_matrix():
matrix = build_scaling_matrix(30, 30, 0)
expected_matrix = np.array(
[[30, 0, 0, 0], [0, 30, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]
)
assert_array_equal(matrix, expected_matrix)
def test_rotation_matrix():
matrix = build_rotation_matrix(30)
expected_matrix = np.array(
[[0.86602529158, -0.5, 0], [0.5, 0.86602529158, 0], [0, 0, 1]]
)
assert_allclose(matrix, expected_matrix, rtol=1e-5, atol=0)
def test_reflection_matrix_over_x():
matrix = build_reflection_matrix("x")
expected_matrix = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]])
assert_array_equal(matrix, expected_matrix)
def test_reflection_matrix_over_y():
matrix = build_reflection_matrix("y")
expected_matrix = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert_array_equal(matrix, expected_matrix)
def test_reflection_matrix_over_origin():
matrix = build_reflection_matrix("origin")
expected_matrix = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
assert_array_equal(matrix, expected_matrix)
@pytest.mark.skip(reason="no way of currently testing this")
def test_point_normalization_zeros():
point = (0, 0)
expected_result = (0, 0)
normalized = normalize_point(point)
assert normalized == expected_result
@pytest.mark.skip(reason="no way of currently testing this")
def test_point_normalization_max():
point = (X_MAX_TRANSLATED, Y_MAX_TRANSLATED)
expected_result = (1, 1)
normalized = normalize_point(point)
assert normalized == expected_result
@pytest.mark.skip(reason="no way of currently testing this")
def test_point_normalization_min():
point = (X_MIN_TRANSLATED, Y_MIN_TRANSLATED)
expected_result = (-1, -1)
normalized = normalize_point(point)
assert normalized == expected_result
@pytest.mark.skip(reason="no way of currently testing this")
def test_point_normalization_over_boundaries():
point = (X_MAX_TRANSLATED * 1000, Y_MAX_TRANSLATED * -1000)
expected_result = (1, -1)
normalized = normalize_point(point)
assert normalized == expected_result
def test_calculate_bspline_parameters():
control_points = [(1, 1), (2, 2), (3, 2), (4, 1)]
delta = 0.1
x, y = calculate_bspline_parameters(control_points, delta)
expected_x = [2, 0.1, 0, 0]
expected_y = [1.833, 0.045, -0.01, 0]
| assert_allclose(x, expected_x, rtol=1e-3, atol=1e-3) | numpy.testing.assert_allclose |
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 <NAME>, <NAME>, <NAME>
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import sys,os
try:
from collections.abc import Iterable ## Python >= 3.3
except ImportError:
from collections import Iterable ## Python < 3.3
sys.path.append("utils")
import numpy
import logging
logger = logging.getLogger(__name__)
import condor.utils.log
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
import condor.utils.resample
from condor.utils.variation import Variation
from condor.utils.pixelmask import PixelMask
from condor.utils.linalg import length
import condor.utils.testing
import condor.utils.scattering_vector
class Detector:
"""
Class for a photon area-detector
.. image:: images/detector_schematic.jpg
**Arguments:**
:distance (float): Distance from interaction point to detector plane
:pixel_size (float): Edge length of detector pixel (square shape)
**Keyword arguments:**
:cx (float): Horizontal beam position in unit pixel. If ``cx=None`` beam will be positioned in the center (default ``None``)
:cy (float): Vertical beam position in unit pixel If ``cy=None`` beam will be positioned in the center (default ``None``)
:center_variation (str): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:center_spread_x (float): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:center_spread_y (float): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:center_variation_n (int): See :meth:`condor.detector.Detector.set_center_variation` (default ``None``)
:noise (str): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:noise_spread (float): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:noise_filename (str): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:noise_dataset (str): See :meth:`condor.detector.Detector.set_noise` (default ``None``)
:saturation_level (float): Value at which detector pixels satutrate (default ``None``)
:binning (int): Pixel binning factor, intensies are integrated over square patches that have an area of ``binning`` x ``binning`` pixels (default ``None``)
:mask_CXI_bitmask (bool): If ``True`` the provided mask (``mask_dataset`` or ``mask``) is a CXI bitmask. For documentation on the implementation of CXI bitmasks see :class:`condor.utils.pixelmask.PixelMask` (default ``False``)
:solid_angle_correction (bool): Whether or not solid angle correction shall be applied (default ``True``)
*Choose one of the following options:*
==================== =============================================================================
``mask_CXI_bitmask`` valid pixels
==================== =============================================================================
``False`` ``1``
``True`` ``(pixels & condor.utils.pixelmask.PixelMask.PIXEL_IS_IN_MASK_DEFAULT) == 0``
==================== =============================================================================
**There are 3 alternative options to specify shape and mask of the detector**
*A) Parameters*
:nx (int): Number of pixels in *x* direction (not including a potential gap or hole) (default ``None``)
:ny (int): Number of pixels in *y* direction (not including a potential gap or hole) (default ``None``)
:x_gap_size_in_pixel (int): Size of central gap along *x* in unit pixel (default ``None``)
:y_gap_size_in_pixel (int): Size of central gap along *y* in unit pixel (default ``None``)
:hole_diameter_in_pixel (int): Diameter of central hole in unit pixel (default ``None``)
*B) HDF5 dataset for mask*
:mask_filename (str): Location of HDF5 file that contains dataset for mask (default ``None``)
:mask_dataset (str): HDF5 dataset (in the file specified by the argument ``mask_filename``) that contains the mask data. Toggle the option ``mask_CXI_bitmask`` for decoding options (default ``None``)
*C) Numpy array for mask*
:mask (array): 2D numpy integer array that defines the mask. Toggle ``mask_CXI_bitmask`` for decoding options (default ``None``)
"""
def __init__(self, distance, pixel_size,
x_gap_size_in_pixel=0, y_gap_size_in_pixel=0, hole_diameter_in_pixel=0, cx_hole=None, cy_hole=None,
noise=None, noise_spread=None, noise_variation_n=None, noise_filename=None, noise_dataset=None,
cx=None, cy=None, center_variation=None, center_spread_x=None, center_spread_y=None, center_variation_n=None,
saturation_level=None, mask=None, mask_filename=None, mask_dataset=None, mask_is_cxi_bitmask=False, solid_angle_correction=True,
nx=None, ny=None, binning=None):
self.distance = distance
self.pixel_size = float(pixel_size)
self._init_mask(mask=mask, mask_is_cxi_bitmask=mask_is_cxi_bitmask, mask_filename=mask_filename, mask_dataset=mask_dataset, nx=nx, ny=ny,
x_gap_size_in_pixel=x_gap_size_in_pixel, y_gap_size_in_pixel=y_gap_size_in_pixel, cx_hole=cx_hole, cy_hole=cy_hole, hole_diameter_in_pixel=hole_diameter_in_pixel)
self.cx_mean = cx if cx != 'middle' else None
self.cy_mean = cy if cy != 'middle' else None
self.set_center_variation(center_variation=center_variation,
center_spread_x=center_spread_x,
center_spread_y=center_spread_y,
center_variation_n=center_variation_n)
self.set_noise(noise=noise,
noise_spread=noise_spread,
noise_variation_n=noise_variation_n,
noise_filename=noise_filename,
noise_dataset=noise_dataset)
self.saturation_level = saturation_level
self.binning = binning
self.solid_angle_correction = solid_angle_correction
def get_conf(self):
"""
Get configuration in form of a dictionary. Another identically configured Detector instance can be initialised by:
.. code-block:: python
conf = D0.get_conf() # D0: already existing Detector instance
D1 = condor.Detector(**conf) # D1: new Detector instance with the same configuration as D0
"""
conf = {}
conf["detector"] = {}
conf["detector"]["distance"] = self.distance
conf["detector"]["pixel_size"] = self.pixel_size
conf["detector"]["cx"] = self.cx_mean
conf["detector"]["cy"] = self.cy_mean
cvar = self._center_variation.get_conf()
conf["detector"]["center_variation"] = cvar["mode"]
conf["detector"]["center_spread_x"] = cvar["spread"][0]
conf["detector"]["center_spread_y"] = cvar["spread"][1]
conf["detector"]["center_variation_n"] = cvar["n"]
noise = self._noise.get_conf()
conf["detector"]["noise"] = noise["mode"]
conf["detector"]["noise_spread"] = noise["spread"]
conf["detector"]["noise_filename"] = self._noise_filename
conf["detector"]["noise_dataset"] = self._noise_dataset
conf["detector"]["saturation_level"] = self.saturation_level
conf["detector"]["mask"] = self._mask.copy()
conf["detector"]["mask_CXI_bitmask"] = True
conf["detector"]["solid_angle_correction"] = self.solid_angle_correction
return conf
def set_noise(self, noise=None, noise_spread=None, noise_variation_n=None, noise_filename=None, noise_dataset=None):
r"""
Set detector noise type and parameters (this method is called during initialisation)
Kwargs:
:noise (str): Noise added to the predicted intensities (default ``None``)
*Choose one of the following options:*
======================= ==================================================================
``noise`` Noise model
======================= ==================================================================
``None`` No noise
``'poisson'`` Poisson noise (*shot noise*)
``'normal'`` Normal (*Gaussian*) noise
``'uniform'`` Uniformly distributed values within spread limits
``'normal_poisson'`` Normal (*Gaussian*) noise on top of Poisson noise (*shot noise*)
``'file'`` Noise data from file
``'file_poisson'`` Noise data from file on top of Poisson noise (*shot noise*)
======================= ==================================================================
:noise_spread (float): Width (full width at half maximum) of the Gaussian or uniform noise distribution (default ``None``)
.. note:: The argument ``noise_spread`` takes only effect in combination with ``noise='normal'``, ``'uniform'`` or ``'normal_poisson'``
:noise_filename (str): Location of the HDF5 file that contains the noise data (default ``None``)
:noise_dataset (str): HDF5 dataset (in the file specified by the argument ``noise_filename``) that contains the noise data (default ``None``)
.. note:: The arguments ``noise_filename`` and ``noise_dataset`` takes effect only in combination with ``noise='file'`` or ``'file_poisson'``
"""
if noise in ["file","file_poisson"]:
self._noise_filename = noise_filename
self._noise_dataset = noise_dataset
self._noise = Variation("poisson" if noise == "file_poisson" else None, noise_spread, noise_variation_n, number_of_dimensions=1)
else:
self._noise_filename = None
self._noise_dataset = None
self._noise = Variation(noise, noise_spread, noise_variation_n, number_of_dimensions=1)
def set_center_variation(self, center_variation=None, center_spread_x=None, center_spread_y=None, center_variation_n=None):
"""
Set the variation of the beam center position (this method is called during initialisation)
Kwargs:
:center_variation(str): Variation of the beam center position (default ``None``)
*Choose one of the following options:*
===================== ==============================================
``center_variation`` Variation model
===================== ==============================================
``None`` No variation
``'normal'`` Normal (*Gaussian*) random distribution
``'uniform'`` Uniform random distribution
``'range'`` Equispaced grid around mean center position
===================== ==============================================
:center_spread_x (float): Width of the distribution of center position in *x* [pixel] (default ``None``)
:center_spread_y (float): Width of the distribution of center position in *y* [pixel] (default ``None``)
.. note:: The arguments ``center_spread_y`` and ``center_spread_x`` take effect only in combination with ``center_variation='normal'``, ``'uniform'`` or ``'range'``
:center_variation_n (int): Number of samples within the specified range (default ``None``)
.. note:: The argument ``center_variation_n`` takes effect only in combination with ``center_variation='range'``
"""
self._center_variation = Variation(center_variation, [center_spread_x,center_spread_y], center_variation_n, number_of_dimensions=2)
def _init_mask(self, mask, mask_is_cxi_bitmask, mask_filename, mask_dataset, nx, ny, x_gap_size_in_pixel, y_gap_size_in_pixel, cx_hole, cy_hole, hole_diameter_in_pixel):
if mask is not None or (mask_filename is not None and mask_dataset is not None):
if mask is not None:
# Copy mask from array
self._mask = numpy.array(mask, dtype=numpy.uint16)
else:
# Read mask from file
import h5py
with h5py.File(mask_filename,"r") as f:
self._mask = numpy.array(f[mask_dataset][:,:], dtype=numpy.uint16)
if not mask_is_cxi_bitmask:
# Convert maskt to CXI bit format
self._mask = (self._mask == 0) * PixelMask.PIXEL_IS_MISSING
elif nx is not None and ny is not None:
# Initialise empty mask
self._mask = numpy.zeros(shape=(int(ny+y_gap_size_in_pixel), int(nx+x_gap_size_in_pixel)),dtype=numpy.uint16)
else:
log_and_raise_error(logger, r"Either 'mask' or 'nx' and 'ny' have to be specified.")
sys.exit(1)
self._nx = self._mask.shape[1]
self._ny = self._mask.shape[0]
# Mask out pixels in gaps
if y_gap_size_in_pixel > 0:
cy = int(numpy.ceil((self._ny-1)/2.))
gy = int(numpy.round(y_gap_size_in_pixel))
self._mask[cy-gy//2:cy-gy//2+gy,:] |= PixelMask.PIXEL_IS_MISSING
if x_gap_size_in_pixel > 0:
cx = int(numpy.ceil((self._nx-1)/2.))
gx = int(numpy.round(x_gap_size_in_pixel))
self._mask[:,cx-gx//2:cx-gx//2+gx] |= PixelMask.PIXEL_IS_MISSING
# Mask out pixels in hole
if hole_diameter_in_pixel > 0:
if cx_hole is None:
cx_hole = (self._nx-1)/2.
if cy_hole is None:
cy_hole = (self._ny-1)/2.
Y,X = numpy.indices((self._ny,self._nx), dtype=numpy.float64)
X = X-cx_hole
Y = Y-cy_hole
R = numpy.sqrt(X**2 + Y**2)
tmp = R<=hole_diameter_in_pixel/2.0
if tmp.sum() > 0:
self._mask[tmp] |= PixelMask.PIXEL_IS_MISSING
def get_mask(self,intensities=None, boolmask=False):
"""
Return mask. The mask has information about the status of each individual detector pixel. The output can be either a CXI bitmask (default) or a boolean mask
For further information and the full bitcode go to :class:`condor.utils.pixelmask.PixelMask`
Kwargs:
:intensities: Numpy array of photon intensities for masking saturated pixels (default ``None``)
:boolmask (bool): If ``True`` the output will be a boolean array. Mask values are converted to ``True`` if no bit is set and to ``False`` otherwise
"""
if intensities is not None:
if not condor.utils.testing.same_shape(intensities, self._mask):
log_and_raise_error(logger, "Intensities and mask do not have the same shape")
M = self._mask.copy()
if self.saturation_level is not None and intensities is not None:
M[intensities >= self.saturation_level] |= PixelMask.PIXEL_IS_SATURATED
if boolmask:
return | numpy.array(M == 0,dtype="bool") | numpy.array |
"""
Created on Thu Oct. 10 2019
Recent changes for the version 0.1.1:
1) Insead of giving the input optical penetration depth only give the input
of the complex refractive index "n". This is a material parameter, so
the input is given in the simulation --> add_layer(.) command.
Now "LB" and "TMM" source are initialized almost in the same way
2) One of the Outputs of sim.run() is T. But now we change it to be a
3 dimensional array, with dim0 = time; dim1 = space; dim2 = subsystem
3) The input for the visual class in the v.contour() function should not be
a string but just numbers corresponding to different systems.
@author: <NAME>
<EMAIL>
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from bspline import Bspline
from bspline.splinelab import aptknt
import time
from matplotlib.animation import FuncAnimation as movie
from tqdm import tqdm #Progressbar
#==============================================================================
class temperature(object):
def __init__(self):
self.plt_points = 30 #number of points in x grid
self.length = np.array([0,0]) #length of x space,starting from 0
self.Left_BC_Type = 1 #Boundary conditions Default is Neumann
self.Right_BC_Type = 1 #1=> Neumann; 0=> Dirichlet
self.init = lambda x: 300+0*x # initial temperature of probe
self.n = np.array([1,1],dtype=complex) # Initial refractive index air|...|air
self.conductivity = [1] #This gets deleted after initialisation
self.heatCapacity = [1] #those values are just here to make space
self.rho = [1] #Actual values are given, when 'addLayer(length, conductivity,heatCapacity,rho)' is executed
self.collocpts = 12
self.setup = False #first time setup to not double calculated
def getProperties(self): #to depict the properties of the object
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Temperature')
#for every layer, a function to calculate the derivative of k(T)
def diff_conductivity(self,phi,num_of_material):
eps =1e-9
dc = (self.conductivity[num_of_material](phi+eps)-self.conductivity[num_of_material](phi))/eps
return(dc)
#Creating the key matrices for B-splines. Those are A0,A1,A2
#A0 => Zero derivative; A1 => 1st order derivative....
#We create the matices for every layer, with respective length ect
#then we put them together to Abig => Boundary and interface conditions are applied here.
def Msetup(self):
#Deleting the ifrst element of the default initialization
#After creating the element with 'addLayer' we dont need them!
if not self.setup:
self.length = self.length[1:]
self.conductivity = self.conductivity[1:]
self.heatCapacity = self.heatCapacity[1:]
self.rho = self.rho[1:]
self.setup = True
#Length and numper of grid points for each respective layer
length = self.length
plt_points = self.plt_points
num_of_points = self.collocpts #Number of points per layer used in the spline for collocation
order = 5 #order of the spline
x = np.array(np.zeros([np.size(length)-1,num_of_points]))
x_plt = np.array(np.zeros([np.size(length)-1,plt_points]))
knot_vector = np.array(np.zeros([np.size(length)-1,num_of_points+order+1]))
basis = np.array(np.zeros(np.size(length)-1))
A0h = []; A1h = []; A2h = []; Ch = [];
LayerMat = np.array([np.zeros((num_of_points,num_of_points))])
#Create all the big matices A0,A1,A2 & C. C is used to map on a fine mesh in x- space.
#For every layer we set up splines between the boundaries
for i in range(0,np.size(length)-1):
x[i,:] = np.linspace(length[i], length[i+1] , num_of_points)
x_plt[i,:] = np.linspace(length[i], length[i+1] , plt_points)
knot_vector[i,:] = aptknt(x[i,:], order) #prepare for Spline matrix
basis = Bspline(knot_vector[i,:],order)
A0hinter = basis.collmat(x[i,:], deriv_order = 0); A0hinter[-1,-1] = 1
A1hinter = basis.collmat(x[i,:], deriv_order = 1); A1hinter[-1] = -np.flip(A1hinter[0],0)
A2hinter = basis.collmat(x[i,:], deriv_order = 2); A2hinter[-1,-1] = 1
Chinter = basis.collmat(x_plt[i,:], deriv_order = 0); Chinter[-1,-1] = 1
LayerMat = np.append(LayerMat,np.array([np.dot(A2hinter,np.linalg.inv(A0hinter))]),axis = 0)
A0h = np.append(A0h,A0hinter)
A1h = np.append(A1h,A1hinter)
A2h = np.append(A2h,A2hinter)
Ch = np.append(Ch,Chinter)
#Reshape the long string of appended Matrix, such that
#rows: x-points; colums: i´th basis spline
LayerMat = LayerMat[1:,:,:]
A0h = np.reshape(A0h, (-1,num_of_points))
A1h = np.reshape(A1h, (-1,num_of_points))
A2h = np.reshape(A2h, (-1,num_of_points))
Ch = np.reshape(Ch,(-1,num_of_points))
#Ch => More points in x, but same number of basis splines
#Clearing the interface points, to not double count
N = num_of_points
plp = plt_points
interfaces = np.shape(x)[0]-1
sizeA = np.shape(x)[0]*N-interfaces
sizeCb = np.shape(x)[0]*plp-interfaces
Abig = np.zeros([sizeA,sizeA])
A1b = np.zeros([sizeA,sizeA])
A2b = np.zeros([sizeA,sizeA])
Cb = np.zeros([sizeCb,sizeA])
#Clearing the double counts from the space grid
xflat = x.flatten()
x_plt_flat = x_plt.flatten()
#index of double counts
doublec = np.array([np.arange(1,len(length)-1)])*N
doublec_plt = np.array([np.arange(1,len(length)-1)])*plp
xflat = np.delete(xflat,doublec)
x_plt_flat = np.delete(x_plt_flat,doublec_plt)
#Filling the big matrices.
startA = 0; endA = N-1
startC = 0; endC = plp-1
for i in range(0,interfaces+1):
Abig[startA:endA,startA:endA+1] = A0h[startA+i:endA+i,:]
A1b[startA:endA+1,startA:endA+1] = A1h[startA+i:endA+i+1,:]
A2b[startA:endA+1,startA:endA+1] = A2h[startA+i:endA+i+1,:]
Cb[startC:endC+1,startA:endA+1] = Ch[startC+i:endC+i+1,:]
startA += N-1; endA += N-1
startC += plp-1; endC += plp-1
#Create A00 with no interface condition to correctly compute phi in loop
#The copy needs to be done befor interface conditions are applied in Abig
A00 = Abig.copy()
A00[-1,-1] = 1;
#Here we make init, conductivity & capacity all functions, in case they are
# given as integeres or floats. Also thorw warinings if not every layer has a
# conducitvity or capacity ============================================
#Making init a function, in case it is given as a scalar
if np.size(self.init) == 1 and isinstance(self.init,(int,float)):
dummy = self.init
self.init = lambda x: dummy + 0*x
if len(length) > 2: #multilayer case
if len(length)-1 !=( len(self.heatCapacity) & len(self.conductivity) ):
print('--------------------------------------------------------')
print('The number of different layers must match the number of number of' \
'inputs for Conductivity, heatCapacity, rho.')
print('--------------------------------------------------------')
if np.size(self.conductivity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a conductivity function' \
'Adjust your input of the conductivity functions with respect to the layers.')
print('--------------------------------------------------------')
if np.size(self.heatCapacity) is not interfaces+1:
print('--------------------------------------------------------')
print('Not every Layer has been given a heatCapacity function value.'\
'Adjust your input of the heatCapacity functions with respect to the layers.')
print('--------------------------------------------------------')
#Make Functions in case heat capacity/conductivity are given as variables
if (all(self.conductivity) or all(self.heatCapacity) or all(self.init)) == False:
print('No heatCapacity, conductivity or initial function given.')
print('--------------------------------------------------------')
#make the conductivity always a function
if len(length) >2 or np.size(self.conductivity)>=2:
for j in list(range (0,np.size(self.conductivity))):
if isinstance(self.conductivity[j],(int,float,list)) :
dummy3 = self.conductivity[j]
self.conductivity[j] = (lambda b: lambda a: b+0*a)(dummy3)
#make the conductivity always a function
for j in list(range (0,np.size(self.heatCapacity))):
if isinstance(self.heatCapacity[j],(int, float,list)) :
dummy4 = self.heatCapacity[j]
self.heatCapacity[j] = (lambda b: lambda a: b+0*a)(dummy4)
else :
if isinstance(self.conductivity[0],(int,float)):
dummy1 = self.conductivity
self.conductivity = [lambda phi: dummy1 + 0*phi]
if isinstance(self.heatCapacity[0],(int,float)):
dummy2 = self.heatCapacity
self.heatCapacity = lambda phi: dummy2 + 0*phi
self.heatCapacity = [self.heatCapacity]
#End of function creation for init(x), conductivity[l](phi), heatCapacity[l](phi)
# with respect to every layer 'l' =====================================
def interconditions(phi,interfaces):
N = num_of_points
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = self.conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = self.conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
#Initial Electron temperature
initphi = self.init(xflat)
initphi_large = self.init(x_plt_flat)
intercon = interconditions(initphi,interfaces)
#filling up Abig wiht the interface condition in the middle of the grid
start_i = 0; end_i = N-1
for i in range(0,interfaces):
Abig[end_i,start_i:end_i] = intercon[0][i][:-1]#Lhs interface flow
Abig[end_i,end_i+1:end_i+N] = -intercon[1][i][1:]#Rhs interface flow
Abig[end_i,end_i] = intercon[0][i][-1] -intercon[1][i][0]
start_i += N-1; end_i += N-1
Abig[-1,-1] = 1 #to correct Cox algorithm
#Now Matrix Abig is completed and interface condition is applied.
#Treating 2 types of boundary conditions: 0=> Dirichlet; 1=> Neumann,
# where 0´th and -1´th row need to be first order derivatives for flux.
neumannBL = A1b[0].copy();
neumannBR = A1b[-1].copy();
if self.Left_BC_Type == 1: Abig[0] = -neumannBL
if self.Right_BC_Type == 1: Abig[-1] = neumannBR
#Clear for BC! (first and last row need to be cleared to correctly apply BC)
A1b[0] = 0; A2b[0] = 0;
A1b[-1] = 0; A2b[-1] = 0;
#Get inital c coefficients for splines using init (=phi_init)
c = np.dot(np.linalg.inv(A00),self.init(xflat))
#Passed on properties to the simulation class
return(c,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h)
def addLayer(self,L,refind,conductivity,heatCapacity,rho):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
self.length = np.append(self.length,self.length[-1]+L)
#Squeez in the refracitve index between two layers of air: air|...|...|air
self.n = np.concatenate((self.n[:-1],[refind],[self.n[-1]]))
self.conductivity.append(conductivity)
self.heatCapacity.append(heatCapacity)
self.rho = np.append(self.rho,rho)
#==============================================================================
class simulation(object):
def __init__(self,num_of_temp,source):
self.temp_data = temperature() #import the temperatuer object
self.num_of_temp = num_of_temp #1 if only electron temp. 2 if electron and lattice temp.
self.start_time = 0 #starting time (can be negative)
self.final_time = 10 #time when simulation stops
self.time_step = [] #can either be given or is automatically calculated in stability
self.left_BC = 0 #function or constant what the boundary condition
self.right_BC = 0 #on the left or right side of the problem is.
self.stability_lim = [270,3000]
self.temp_data_Lat = [] #Default case is without lattice temperature
self.temp_data_Spin = []
if num_of_temp >= 2: #if Lattice temp is considered
self.temp_data_Lat = temperature() #in case also a lattice module is given
self.coupling = [] #Coupling between Electron and Lattice system
self.left_BC_L = 0 #Setting the default to zero flux
self.right_BC_L = 0 #The BC type is indicated in the temperature class
if num_of_temp == 3: #In case spin coupling is also considered
self.temp_data_Spin = temperature()
self.coupling_LS = [] #Coupling between Lattice and Spin system
self.coupling_SE = [] #Coupling between Electron and Spin system
self.left_BC_S = 0 #Default zero flux Neumann boundary conditions
self.right_BC_S = 0 #On both sides
self.source = source #object source can be passed on
#to depict the properties of the object
def getProperties(self):
for i in (self.__dict__):
name = str(i)
value = str(self.__dict__[i])
print('{:<20}{:>10}'.format( name,value ))
def __repr__(self):
return('Simulation')
def changeInit(self,system,function):
"""
Change the initial condition of every system.
.changeInit(system,function) has 2 input arguments
system --> string "electron" or "lattice" or "spin"
function --> a function handle or a number defining the value of the
system at t=0 over the entire domain x.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
self.temp_data.init = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
self.temp_data_Lat.init = function
if (system == "spin") or (system == "Spin") or (system == 3):
self.temp_data_Spin = function
def changeBC_Type(self,system,side,BCType):
"""
Function to change the type of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Type(system,side,BCType) has 3 inputs, all of them are strings.
system --> "electron" or "lattice" or "spin". Altenatively: "1", "2", "3"
side --> "left" or "right"
BCType --> "dirichlet" fixing the value/ "neumann" fixing the flux.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data.Right_BC_Type = 1
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Lat.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Lat.Right_BC_Type = 1
if (system == "spin") or (system == "Spin") or (system == 3):
print("Line 326 Spinsystem")
if side == "left":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Left_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Left_BC_Type = 1
if side == "right":
if (BCType == "dirichlet") or (BCType == 0):
self.temp_data_Spin.Right_BC_Type = 0
if (BCType == "neumann") or (BCType == 1):
self.temp_data_Spin.Right_BC_Type = 1
def changeBC_Value(self,system,side,function):
"""
Function to change the value of the boundary condition on the left and
right side of the material, for every system, "electron", "lattice", "spin"
respectively.
.changeBC_Value(system,side,function) the first two are strings,
the last one is a function handle or a number.
system --> "electron" or "lattice" or "spin"| Altenatively: "1", "2", "3"
side --> "left" or "right"
function--> function or number fixing the value on the boundaries for all times.
"""
if (system == "electron") or (system == "Electron") or (system == 1):
if side == "left":
self.left_BC = function
if side == "right":
self.right_BC = function
if (system == "lattice") or (system == "Lattice") or (system == 2):
if side == "left":
self.left_BC_L = function
if side == "right":
self.right_BC_L = function
if (system == "spin") or (system == "Spin") or (system == 3):
if side == "left":
self.left_BC_S = function
if side == "right":
self.right_BC_S = function
def addSubstrate(self,name = "silicon"):
"""
Automatically create in the silicon substrate using input
parameters, mostly taken from:
Contribution of the electron-phonon interaction
to Lindhard energy partition at low energy in Ge and Si
detectors for astroparticle physics applications, by
<NAME> and <NAME>
Note: Refractive index for 400 nm light!
"""
if (name == "Silicon") or (name =="silicon") or (name =="Si"):
k_el_Si = 130#W/(m*K);
k_lat_Si = lambda T: np.piecewise(T,[T<=120.7,T>120.7],\
[lambda T: 100*(0.09*T**3*(0.016*np.exp(-0.05*T)+np.exp(-0.14*T))),
lambda T: 100*(13*1e3*T**(-1.6))])
rho_Si = 2.32e3#kg/(m**3)
C_el_Si = lambda Te: 150/rho_Si *Te
C_lat_Si = 1.6e6/rho_Si
G_Si = 1e17*18#W/(m**3*K)
#Set three layers of Silicon after each other.
#The space resolution on the Film|Substrate edge is high
#and decreases as one moves in bulk direction
if self.num_of_temp == 2:#Lattice only in the 2T
self.temp_data_Lat.addLayer(20e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
self.temp_data_Lat.addLayer(100000e-9,5.5674+0.38612j,k_lat_Si,C_lat_Si,rho_Si)
self.coupling = np.append(self.coupling,G_Si)
#In the 1 and 2 temperature case electron always gets appended
self.temp_data.addLayer(20e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
self.temp_data.addLayer(100000e-9,5.5674+0.38612j,k_el_Si,C_el_Si,rho_Si)
def addLayer(self,L,n,conductivity,heatCapacity,rho,coupling=0,*args):
"""
Add parameters of every layer:
(length,conductivity[electron,lattice,spin],heatCapacity[electron, lattice, spin],density, coupling[E-L,L-S,S-E])
The units in SI are:
[length] = m
[n] = complex refractive index
[conductivity] = W/(mK)
[heatCapacity] = J/(m^3K^2)
[density] = kg/m^3
[Coupling] = W/(m^3K)
"""
#check all input arguments and make them to lists, for the multi layer case
#make list when given as int or float
typecheck = np.array([])
if type(conductivity) is not (list or type(typecheck)):
conductivity = [conductivity]
if type(heatCapacity) is not (list or type(typecheck)):
heatCapacity = [heatCapacity]
#do typecheck only for the lattice system in the 2TM-case
if self.num_of_temp == 2:
if (np.size(conductivity) or np.size(heatCapacity))<2:
print('Lattice parameters are missing.\n Add parameters for Lattice system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
#Only electron spin coupling is under consideration
self.coupling = np.append(self.coupling,coupling)
#do typecheck for the Lattice and the Spin system
if self.num_of_temp == 3:
if (np.size(conductivity) or np.size(heatCapacity) or np.size(coupling))<3:
print('Input parameters are missing.\n Add parameters for '\
'conductivity/heatCapacity or coupling for Lattice/Spin system.')
return(128)
self.temp_data_Lat.addLayer(L,n,conductivity[1],heatCapacity[1],rho)
self.temp_data_Spin.addLayer(L,n,conductivity[2],heatCapacity[2],rho)
#In the 3Tm case the coupling input arg is a vector of len 3. Unwrap them:
self.coupling = np.append(self.coupling,coupling[0])
self.coupling_LS = np.append(self.coupling_LS,coupling[1])
self.coupling_SE = np.append(self.coupling_SE,coupling[2])
#For the electronic system always add the parameters!
self.temp_data.addLayer(L,n,conductivity[0],heatCapacity[0],rho)
def interconditions(self,phi,interfaces,conductivity,N,A1h):
"""
A function which gives back an array where the intereface condition is returned
for the left and right side of the interface. Gets called in the E.E.-loop.
"""
end_i = N-1
intercondiL = np.zeros((interfaces,N))
intercondiR = np.zeros((interfaces,N))
for i in range(interfaces):
intercondiL[i] = conductivity[i](phi[end_i])*A1h[end_i+i]
intercondiR[i] = conductivity[i+1](phi[end_i])*A1h[end_i+i+1]
end_i += N-1
return(intercondiL,intercondiR)
def sourceprofile(self,absorptionprofile,timeprofile,xflat,x0,t,N):
#Consider Lambert Beers law in space and different types in time
if (absorptionprofile == "LB") and (self.source.fluence is not 0):
optical_penetration_depth = self.source.ref2delta(self.temp_data.n,self.source.lambda_vac)
if (timeprofile == "Gaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer´s absorption law and a Gaussian time profile is applied as source.')
print('-----------------------------------------------------------')
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian)
if (timeprofile == "repGaussian") or (timeprofile == "RepGaussian"):
print('-----------------------------------------------------------')
print('Lambert Beer absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'The frequency of the pulse repetition has to be indicated via s.frequency = number (in 1/seconds).')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
[ttime,amplitude] = self.source.loadData
#To extract the custom time profile and the scaling factor
[sourcemat,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,optical_penetration_depth[0])
#To get the space profile: Source with different optical penetration depth defined on the xflat gird
sourceM = self.source.init_G_source(xflat,x0,t,optical_penetration_depth,N,self.source.Gaussian,customtime,scaling)
#Consider Transfer Matrix in space and different types in time
if (absorptionprofile == "TMM") and (self.source.fluence is not 0):
"""
This will implement a transfer matrix approach to local absorption
instead as using the Lambert Beer´s law considered in the Gaussian
source type.
"""
#Multiplying with 1e9, since the absorption()-function. In the source module only works if length is in units of nm!
x0m = x0*1e9#converte the lentgh into nm
if len(x0) is not (len(self.temp_data.n)-1):
print('-----------------------------------------------------------')
print('Number of considered layers does not match with given refractive indices.\n'\
'in ´temperature.n(Air|Film layer1|Film layer2|...|Air)´ anly consider the film layers. \n'\
'The refractive index of the substrate gets added automatically later when \n'\
'`simulation.addSubstrate(\'name\')` gets called.')
print('-----------------------------------------------------------')
if (timeprofile == "Gaussian"):
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m)
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a Gaussian time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if (timeprofile == "custom") or (timeprofile == "Custom"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile of and a custom time profile is taken into account for the source.\n'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
if self.source.loadData is False:
print('-----------------------------------------------------------')
print('Import an array, containing the data of the custom pulse.'\
'arr[0,:] = time; arr[1,:] = amplitude')
print('-----------------------------------------------------------')
[ttime,amplitude] = self.source.loadData
lam = 1#Lamda does not matter here since the spacial absorption is calculated via TMM
[sourceM,customtime,scaling] = self.source.custom(t,xflat,ttime,amplitude,lam)
#The cfeateTMM(xgrid,timegrid,length,*args) has customtime as an optional argument
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime,scaling)
if (timeprofile == "RepGaussian") or (timeprofile== "repGaussian"):
print('-----------------------------------------------------------')
print('Transfer matrix absorption profile and a repeated Gaussian time profile is taken into account for the source.'\
'Length of every layer has to be given in units of meter.')
print('-----------------------------------------------------------')
self.source.multipulse = True
xmg, tmg = np.meshgrid(xflat,t)
if (self.source.frequency is not False):
time_range = tmg[-1,-1]-self.source.t0
pulses = int(round(time_range * self.source.frequency))
#Add up Gaussian pulses with different t0, according to the frequency given
#from t0 onwards, until the end of the time grid
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 + i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is not False) and (self.source.num_of_pulses is not False):
#Creating a certain number of pulses according to self.num_of_pulses
time_range = tmg[-1,-1]-self.source.t0
pulses = self.source.num_of_pulses
#If num_of_pulses is bigger too big to fit in the timerange [t0,t_end] throw warning
if (pulses > int(round(time_range * self.source.frequency))):
pulses = int(round(time_range * self.source.frequency))
print('Number of pulses is too big to fit in the timerange under consideration. \n'\
'Adjust t_end or consider a smaller number of pulses.')
customtime = np.zeros(np.shape(tmg))
for i in range(0,pulses):
t00 = self.source.t0 +i/self.source.frequency
customtime +=np.exp(-(tmg-t00)**2*np.log(2)/(self.source.FWHM**2))
sourceM = self.source.createTMM(self.temp_data.n,xflat,t,x0m,customtime)
if(self.source.frequency is False) and (self.source.num_of_pulses is False):
print('-----------------------------------------------------------')
print('Assign the propertiy s.frequncy, to consider a certain pulse frequency.\n'\
'If only a certain number of pulses should be considered, assign the value s.num_of_pulses = integer.')
print('-----------------------------------------------------------')
return(sourceM)
# This is the main Explicit Euler loop where the solution to T(x,t) is calculated.
def run(self):
idealtimestep = self.stability()
if not self.time_step:
self.time_step = idealtimestep
print('-----------------------------------------------------------')
print(' No specific time constant has been indicated. \n '\
'The stability region has been calculated and an appropriate timestep has been chosen.\n '\
'Timestep = {idealtimestep:.2e} s'.format(idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if (self.time_step-idealtimestep)/idealtimestep > 0.1:
print('-----------------------------------------------------------')
print('The manually chosen time step of {time_step:.2e} is eventually too big and could cause instabilities in the simulation.\n '\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
if(self.time_step-idealtimestep)/idealtimestep < -0.2:
print('-----------------------------------------------------------')
print('The maunually chosen time step of {time_step:.2e} is very small and will eventually cause a long simulation time.\n'\
'We suggest a timestep of {idealtimestep:.2e} s'.format(time_step=self.time_step,idealtimestep=idealtimestep))
print('-----------------------------------------------------------')
#loading simulation relevant properties from the structural tmeperature object
[c_E,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large,interfaces,LayerMat,A1h] = self.temp_data.Msetup()
t = np.arange(self.start_time,self.final_time,self.time_step)
#only if the injection would make the time grid smaller, to not move into instable regime
if self.source.FWHM:
if (6*self.source.FWHM/200 < idealtimestep):
#inject 200 extra points around pulse to fully capture the shape of the pulse
tinj = np.linspace(self.source.t0 - 3*self.source.FWHM,self.source.t0 + 3*self.source.FWHM,200)
smaller = np.where(t<self.source.t0 - 3*self.source.FWHM)[0]
bigger = np.where(t>self.source.t0 + 3*self.source.FWHM)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
#If a more refined grid is choosen around t0. We inject a fine time grid around t0, to correctly capture the pulse shape
if self.source.adjusted_grid is not False:
if self.source.dt0 == False:
print('-----------------------------------------------------------')
print('The option for an adjusted grid is True, but no interval for a more refined grid has been given.'/
'Indicate dt0 (around which values the time grid should have higher resolution) in the source object')
print('-----------------------------------------------------------')
if 2*self.source.dt0/self.source.extra_points < idealtimestep:
print('-----------------------------------------------------------')
print('A refined Grid around t0 has been applied')
print('-----------------------------------------------------------')
tinj = np.linspace(self.source.t0-self.source.dt0,self.source.t0+self.source.dt0,self.source.extra_points)
smaller = np.where(t<self.source.t0 - self.source.dt0)[0]
bigger = np.where(t>self.source.t0 + self.source.dt0)[0]
#new time grid with higher resolution
t = np.concatenate((t[smaller],tinj,t[bigger]),axis=0)
tstep = np.ones(len(t))
tstep[:-1] = np.diff(t); tstep[-1] = np.diff(t)[-1]
else:
print('-----------------------------------------------------------')
print('No refined time grid is applied. The timestep is alerady very small.' \
'You can use the simulation class with the property self.time_step and '\
'assign it to a smaller value as the current time step.')
print('-----------------------------------------------------------')
#Initialize the systems and load the matrices
if self.temp_data_Lat:
if self.temp_data.plt_points is not self.temp_data_Lat.plt_points:
self.temp_data_Lat.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Lat.collocpts:
self.temp_data_Lat.collocpts = self.temp_data.collocpts
print(self.temp_data_Lat.collocpts)
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the lattice system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_L,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_L,interfaces,LayerMat,A1h] = self.temp_data_Lat.Msetup()
if self.temp_data_Spin:
print("Line 728 Spinsystem")
if self.temp_data.plt_points is not self.temp_data_Spin.plt_points:
self.temp_data_Spin.plt_points = self.temp_data.plt_points
print('-----------------------------------------------------------')
print('The number of plotting points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
if self.temp_data.collocpts is not self.temp_data_Spin.collocpts:
self.temp_data_Spin.collocpts = self.temp_data.collocpts
print('-----------------------------------------------------------')
print('The number of collocation points in the electron system \n'\
'is not the same as in the spin system.\n'\
'They are set equal to avoid matrix dimension missmatch.')
print('-----------------------------------------------------------')
[c_S,A00,Abig,A1b,A2b,Cb,length,N,plp,xflat,x_plt_flat,initphi_large_S,interfaces,LayerMat,A1h] = self.temp_data_Spin.Msetup()
if (self.source.fluence == 0):
print('-----------------------------------------------------------')
print('No source is applied.\n'\
'source.fluence = 0')
print('-----------------------------------------------------------')
xmg, tmg = np.meshgrid(xflat,t)
sourceM = np.zeros_like(xmg)
else:
sourceM = self.sourceprofile(self.source.spaceprofile,self.source.timeprofile,xflat,self.temp_data.length,t,N)
#Making the boundary conditions a function of t, in case they are given as scalars
if isinstance(self.left_BC,(int,float)):
dummy = self.left_BC
self.left_BC = lambda t: dummy + 0*t
if isinstance(self.right_BC,(int,float)):
dummy1 = self.right_BC
self.right_BC = lambda t: dummy1 + 0*t
#Makint the boundary conditions a matrix for the electron case
BC_E = np.zeros((len(c_E),len(t)))
BC_E[0] = self.left_BC(t)
BC_E[-1] = self.right_BC(t)
#Checking the Lattice system boundary conditions
if self.temp_data_Lat:
if isinstance(self.left_BC_L,(int,float)):
dummy2 = self.left_BC_L
self.left_BC_L = lambda t: dummy2 + 0*t
if isinstance(self.right_BC_L,(int,float)):
dummy3 = self.right_BC_L
self.right_BC_L = lambda t: dummy3 + 0*t
#Makint the boundary conditions a matrix for the lattice case
BC_L = np.zeros((len(c_L),len(t)))
BC_L[0] = self.left_BC_L(t)
BC_L[-1] = self.right_BC_L(t)
#Checking the Spine system boundary conditions
#It impies that we at least consider 2 temperatures -> under this "if-tree"
if self.temp_data_Spin:
if isinstance(self.left_BC_S,(int,float)):
dummy4 = self.left_BC_S
self.left_BC_S = lambda t: dummy4 + 0*t
if isinstance(self.right_BC_S,(int,float)):
dummy5 = self.right_BC_S
self.right_BC_S = lambda t: dummy5 + 0*t
#Makint the boundary conditions a matrix for the Spin case
BC_S = np.zeros((len(c_S),len(t)))
BC_S[0] = self.left_BC_S(t)
BC_S[-1] = self.right_BC_S(t)
#Check if the Lattice/Spin and Spin/Electron coupling constants have the right size
if np.size(self.coupling_LS)<np.size(length)-1:
self.coupling_LS = self.coupling_LS*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Lattice-Spin coupling constant \'G_LS \'.\n')\
('=> G_LS will be set to the value of the first layer = {coupling_LS[0]:.2e}\n for all other layers.'.format(coupling_LS=self.coupling_LS))
print('-----------------------------------------------------------')
if np.size(self.coupling_SE)<np.size(length)-1:
self.coupling_SE = self.coupling_SE*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique Spin-Electron coupling constant \'G_SE \'.\n')\
('=> G_SE will be set to the value of the first layer = {coupling_SE[0]:.2e}\n for all other layers.'.format(coupling_SE=self.coupling_SE))
print('-----------------------------------------------------------')
#If only the two temperature model is considered I only need to check one coupling constant
if np.size(self.coupling)<np.size(length)-1:
self.coupling = self.coupling*np.ones(np.size(self.temp_data.length)-1)
print('-----------------------------------------------------------')
print('Not every layer has a unique coupling constant \'G \'.\n')\
('=> G will be set to the value of the first layer = {coupling[0]:.2e}\n for all other layers.'.format(coupling=self.coupling))
print('-----------------------------------------------------------')
# The 3 Temperature Case is being considered
if self.temp_data_Spin:
#Setup arrays for electron temperature
phi_E = np.zeros((len(t),len(x_plt_flat))); phi_E[0] = initphi_large
Flow_1E = np.zeros(len(c_E))
Flow_2E = np.zeros(len(c_E))
dphi_E = np.zeros(len(c_E))
intphi_E = np.zeros(len(c_E))
#Setup arrays for lattice temperature
phi_L = np.zeros((len(t),len(x_plt_flat))); phi_L[0] = initphi_large_L #300*np.ones(len(phi_L[0]))
Flow_1L = np.zeros(len(c_L))
Flow_2L = np.zeros(len(c_L))
dphi_L = np.zeros(len(c_L))
intphi_L = np.zeros(len(c_L))
#Setup arrays for the spin temperature
phi_S = np.zeros((len(t),len(x_plt_flat))); phi_S[0] = initphi_large_S #300*np.ones(len(phi_L[0]))
Flow_1S = np.zeros(len(c_S))
Flow_2S = np.zeros(len(c_S))
dphi_S = np.zeros(len(c_S))
intphi_S = np.zeros(len(c_S))
#General setup for E.E. loop
condi = np.array([np.arange(1,len(length)-1)])*(N-1) #Index to apply interface condition
cnfill = np.array([np.arange(1,len(length)-1)])*(plp-1)#correct interface condition with real value for phi
A00[0] = 1; A00[-1] = 1 #Avoide devide through 0 in dphi_L! Clar for BC before intphi calc.
Abig_E = | np.copy(Abig) | numpy.copy |
"""
Module for performing Promiscuity Index
calculations for the Hetaira web tool.
"""
import numpy as np
from .util import process_data
from string import ascii_lowercase
def calculate_results(file):
"""
Working function to be called by main view function.
"""
data = process_data(file)
promiscuity = Promiscuity(data[0], data[1], data[2])
return promiscuity.hetaira_results()
class Promiscuity:
"""
A class to compute and return Promiscuity Indicies. Included are
methods for calculating both the unweighted Promiscuity Index (I),
and J, the Promiscuity Index weighted by dissimiliarity.
Also available is the overall set dissimiliarity.
"""
def __init__(self, items, data, descriptors=None, min = 1e-6):
self.items = items
# min is the presumed lower bound of the functional unit
self.data = np.asarray(data) + min
self.descriptors = descriptors
if self.descriptors is not None:
self.d_length = len(descriptors)
self.dset = self.dset()
self.avg_dists = self.avg_dists()
else:
self.dset = 'not determined'
def jaccard(self, u, v):
"""
Computes the Jaccard distance between two boolean 1-D arrays.
"""
dist = np.dot(u, v) / np.double(np.bitwise_or(u, v).sum())
return 1 - dist
def avg_dists(self):
"""
Computes the average Jaccard distance between each 1-D
boolean array and all the others in the set.
"""
d = self.descriptors
# make an empty array to fill b/c it is a touch faster
averages = np.empty([1, self.d_length])
for i, u in enumerate(d):
s = 0
for j, v in enumerate(d):
if i != j:
s += self.jaccard(u, v)
averages[0, i] = (s / (self.d_length-1))
return averages[0]
def ivalue(self, idx):
"""
Calculates the unweighted Promicuity Index.
The data should be strictly > 0 and more positive is 'better'.
"""
a = self.data[:,idx] / self.data[:,idx].sum()
results = -(np.dot(a, | np.log(a) | numpy.log |
"""Tests for the mask.py script."""
import pytest
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import unittest.mock as mock
from deltametrics import cube
from deltametrics import mask
from deltametrics.plan import OpeningAnglePlanform
from deltametrics.sample_data import _get_rcm8_path, _get_golf_path
rcm8_path = _get_rcm8_path()
with pytest.warns(UserWarning):
rcm8cube = cube.DataCube(rcm8_path)
golf_path = _get_golf_path()
golfcube = cube.DataCube(golf_path)
_OAP_0 = OpeningAnglePlanform.from_elevation_data(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
_OAP_05 = OpeningAnglePlanform.from_elevation_data(
golfcube['eta'][-1, :, :],
elevation_threshold=0.5)
@mock.patch.multiple(mask.BaseMask,
__abstractmethods__=set())
class TestBaseMask:
"""
To test the BaseMask, we patch the base job with a filled abstract method
`.run()`.
.. note:: This patch is handled at the class level above!!
"""
fake_input = np.ones((100, 200))
@mock.patch('deltametrics.mask.BaseMask._set_shape_mask')
def test_name_setter(self, patched):
basemask = mask.BaseMask('somename', self.fake_input)
assert basemask.mask_type == 'somename'
patched.assert_called() # this would change the shape
assert basemask.shape is None # so shape is not set
assert basemask._mask is None # so mask is not set
def test_simple_example(self):
basemask = mask.BaseMask('field', self.fake_input)
# make a bunch of assertions
assert np.all(basemask._mask == False)
assert np.all(basemask.integer_mask == 0)
assert basemask._mask is basemask.mask
assert basemask.shape == self.fake_input.shape
def test_trim_mask_length(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
_l = 5
basemask.trim_mask(length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 0)
assert np.all(basemask.integer_mask[_l:, :] == 1)
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_trim_mask_cube(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
basemask.trim_mask(golfcube)
# assert np.all(basemask.integer_mask[:5, :] == 0)
# assert np.all(basemask.integer_mask[5:, :] == 1)
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_trim_mask_noargs(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
basemask.trim_mask()
# assert np.all(basemask.integer_mask[:5, :] == 0)
# assert np.all(basemask.integer_mask[5:, :] == 1)
def test_trim_mask_axis1_withlength(self):
basemask = mask.BaseMask('field', self.fake_input)
# mock as though the mask were made
basemask._mask = self.fake_input.astype(bool)
assert np.all(basemask.integer_mask == 1)
_l = 5
basemask.trim_mask(axis=0, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:, :_l] == 0)
assert np.all(basemask.integer_mask[:, _l:] == 1)
def test_trim_mask_diff_True(self):
basemask = mask.BaseMask('field', self.fake_input)
# everything is False (0)
assert np.all(basemask.integer_mask == 0)
_l = 5
basemask.trim_mask(value=True, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 1)
assert np.all(basemask.integer_mask[_l:, :] == 0)
def test_trim_mask_diff_ints(self):
basemask = mask.BaseMask('field', self.fake_input)
# everything is False (0)
assert np.all(basemask.integer_mask == 0)
_l = 5
basemask.trim_mask(value=1, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 1)
basemask.trim_mask(value=0, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 0)
basemask.trim_mask(value=5, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 1)
basemask.trim_mask(value=5.534, length=_l)
assert basemask._mask.dtype == bool
assert np.all(basemask.integer_mask[:_l, :] == 1)
def test_trim_mask_toomanyargs(self):
basemask = mask.BaseMask('field', self.fake_input)
with pytest.raises(ValueError):
basemask.trim_mask('arg1', 'arg2', value=1, length=1)
def test_show(self):
"""
Here, we just test whether it works, and whether it takes a
specific axis.
"""
basemask = mask.BaseMask('field', self.fake_input)
# test show with nothing
basemask.show()
plt.close()
# test show with colorbar
basemask.show(colorbar=True)
plt.close()
# test show with title
basemask.show(title='a title')
plt.close()
# test show with axes, bad values
fig, ax = plt.subplots()
basemask.show(ax=ax)
plt.close()
def test_show_error_nomask(self):
"""
Here, we just test whether it works, and whether it takes a
specific axis.
"""
basemask = mask.BaseMask('field', self.fake_input)
# mock as though something went wrong
basemask._mask = None
with pytest.raises(RuntimeError):
basemask.show()
def test_no_data(self):
"""Test when no data input raises error."""
with pytest.raises(ValueError, match=r'Expected 1 input, got 0.'):
_ = mask.BaseMask('field')
def test_invalid_data(self):
"""Test invalid data input."""
with pytest.raises(TypeError, match=r'Unexpected type was input: .*'):
_ = mask.BaseMask('field', 'a string!!')
def test_invalid_second_data(self):
"""Test invalid data input."""
with pytest.raises(TypeError, match=r'First input to mask .*'):
_ = mask.BaseMask('field', np.zeros((100, 200)), 'a string!!')
def test_return_empty(self):
"""Test when no data input, but allow empty, returns empty."""
empty_basemask = mask.BaseMask('field', allow_empty=True)
assert empty_basemask.mask_type == 'field'
assert empty_basemask.shape is None
assert empty_basemask._mask is None
assert empty_basemask._mask is empty_basemask.mask
def test_is_mask_deprecationwarning(self):
"""Test that TypeError is raised if is_mask is invalid."""
with pytest.warns(DeprecationWarning):
_ = mask.BaseMask('field', self.fake_input,
is_mask='invalid')
with pytest.warns(DeprecationWarning):
_ = mask.BaseMask('field', self.fake_input,
is_mask=True)
def test_3dinput_deprecationerror(self):
"""Test that TypeError is raised if is_mask is invalid."""
with pytest.raises(ValueError, match=r'Creating a `Mask` .*'):
_ = mask.BaseMask('field', np.random.uniform(size=(10, 100, 200)))
class TestShorelineMask:
"""Tests associated with the mask.ShorelineMask class."""
# define an input mask for the mask instantiation pathway
_ElevationMask = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
def test_default_vals_array(self):
"""Test that instantiation works for an array."""
# define the mask
shoremask = mask.ShorelineMask(
rcm8cube['eta'][-1, :, :],
elevation_threshold=0)
# make assertions
assert shoremask._input_flag == 'array'
assert shoremask.mask_type == 'shoreline'
assert shoremask.angle_threshold > 0
assert shoremask._mask.dtype == bool
assert isinstance(shoremask._mask, np.ndarray)
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_default_vals_cube(self):
"""Test that instantiation works for an array."""
# define the mask
shoremask = mask.ShorelineMask(rcm8cube, t=-1)
# make assertions
assert shoremask._input_flag == 'cube'
assert shoremask.mask_type == 'shoreline'
assert shoremask.angle_threshold > 0
assert shoremask._mask.dtype == bool
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_default_vals_cubewithmeta(self):
"""Test that instantiation works for an array."""
# define the mask
shoremask = mask.ShorelineMask(golfcube, t=-1)
# make assertions
assert shoremask._input_flag == 'cube'
assert shoremask.mask_type == 'shoreline'
assert shoremask.angle_threshold > 0
assert shoremask._mask.dtype == bool
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_default_vals_mask(self):
"""Test that instantiation works for an array."""
# define the mask
shoremask = mask.ShorelineMask(self._ElevationMask)
# make assertions
assert shoremask._input_flag == 'mask'
assert shoremask.mask_type == 'shoreline'
assert shoremask.angle_threshold > 0
assert shoremask._mask.dtype == bool
def test_angle_threshold(self):
"""Test that instantiation works for an array."""
# define the mask
shoremask_default = mask.ShorelineMask(
rcm8cube['eta'][-1, :, :],
elevation_threshold=0)
shoremask = mask.ShorelineMask(
rcm8cube['eta'][-1, :, :],
elevation_threshold=0,
angle_threshold=45)
# make assertions
assert shoremask.angle_threshold == 45
assert not np.all(shoremask_default == shoremask)
def test_submergedLand(self):
"""Check what happens when there is no land above water."""
# define the mask
shoremask = mask.ShorelineMask(
rcm8cube['eta'][0, :, :],
elevation_threshold=0)
# assert - expect all True values should be in one row
_whr_edge = np.where(shoremask._mask[:, 0])
assert _whr_edge[0].size > 0 # if fails, no shoreline found!
_row = int(_whr_edge[0][0])
assert np.all(shoremask._mask[_row, :] == 1)
assert np.all(shoremask._mask[_row+1:, :] == 0)
def test_static_from_OAP(self):
shoremask = mask.ShorelineMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
mfOAP = mask.ShorelineMask.from_OAP(_OAP_0)
shoremask_05 = mask.ShorelineMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0.5)
mfOAP_05 = mask.ShorelineMask.from_OAP(_OAP_05)
assert np.all(shoremask._mask == mfOAP._mask)
assert np.all(shoremask_05._mask == mfOAP_05._mask)
def test_static_from_mask_ElevationMask(self):
shoremask = mask.ShorelineMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
mfem = mask.ShorelineMask.from_mask(self._ElevationMask)
shoremask_05 = mask.ShorelineMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0.5)
assert np.all(shoremask._mask == mfem._mask)
assert np.sum(shoremask_05.integer_mask) < np.sum(shoremask.integer_mask)
def test_static_from_array(self):
"""Test that instantiation works for an array."""
# define the mask
_arr = np.ones((100, 200))
_arr[50:55, :] = 0
shoremask = mask.ShorelineMask.from_array(_arr)
# make assertions
assert shoremask.mask_type == 'shoreline'
assert shoremask._input_flag is None
assert np.all(shoremask._mask == _arr)
_arr2 = np.random.uniform(size=(100, 200))
_arr2_bool = _arr2.astype(bool)
assert _arr2.dtype == float
shoremask2 = mask.ShorelineMask.from_array(_arr2)
# make assertions
assert shoremask2.mask_type == 'shoreline'
assert shoremask2._input_flag is None
assert np.all(shoremask2._mask == _arr2_bool)
class TestElevationMask:
"""Tests associated with the mask.LandMask class."""
def test_default_vals_array(self):
"""Test that instantiation works for an array."""
# define the mask
elevationmask = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
# make assertions
assert elevationmask._input_flag == 'array'
assert elevationmask.mask_type == 'elevation'
assert elevationmask.elevation_threshold == 0
assert elevationmask.threshold == 0
assert elevationmask.elevation_threshold is elevationmask.threshold
assert elevationmask._mask.dtype == bool
def test_all_below_threshold(self):
elevationmask = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=10)
# make assertions
assert elevationmask._input_flag == 'array'
assert elevationmask.mask_type == 'elevation'
assert elevationmask.elevation_threshold == 10
assert elevationmask.threshold == 10
assert elevationmask.elevation_threshold is elevationmask.threshold
assert elevationmask._mask.dtype == bool
assert np.all(elevationmask.mask == 0)
def test_all_above_threshold(self):
elevationmask = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=-10)
# make assertions
assert elevationmask._input_flag == 'array'
assert elevationmask.mask_type == 'elevation'
assert elevationmask.elevation_threshold == -10
assert elevationmask.threshold == -10
assert elevationmask.elevation_threshold is elevationmask.threshold
assert elevationmask._mask.dtype == bool
assert np.all(elevationmask.mask == 1)
def test_default_vals_array_needs_elevation_threshold(self):
"""Test that instantiation works for an array."""
# define the mask
with pytest.raises(TypeError, match=r'.* missing'):
_ = mask.ElevationMask(rcm8cube['eta'][-1, :, :])
def test_default_vals_cube(self):
"""Test that instantiation works for an array."""
# define the mask
elevationmask = mask.ElevationMask(
rcm8cube, t=-1,
elevation_threshold=0)
# make assertions
assert elevationmask._input_flag == 'cube'
assert elevationmask.mask_type == 'elevation'
assert elevationmask._mask.dtype == bool
def test_default_vals_cubewithmeta(self):
"""Test that instantiation works for an array."""
# define the mask
elevationmask = mask.ElevationMask(
golfcube, t=-1,
elevation_threshold=0)
# make assertions
assert elevationmask._input_flag == 'cube'
assert elevationmask.mask_type == 'elevation'
assert elevationmask._mask.dtype == bool
# compare with another instantiated from array
elevationmask_comp = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
assert np.all(elevationmask_comp.mask == elevationmask.mask)
# try with a different elevation_threshold (higher)
elevationmask_higher = mask.ElevationMask(
golfcube, t=-1,
elevation_threshold=0.5)
assert (np.sum(elevationmask_higher.integer_mask) <
np.sum(elevationmask.integer_mask))
def test_default_vals_cube_needs_elevation_threshold(self):
"""Test that instantiation works for an array."""
# define the mask
with pytest.raises(TypeError, match=r'.* missing'):
_ = mask.ElevationMask(
rcm8cube, t=-1)
with pytest.raises(TypeError, match=r'.* missing'):
_ = mask.ElevationMask(
golfcube, t=-1)
def test_default_vals_mask_notimplemented(self):
"""Test that instantiation works for an array."""
# define the mask
_ElevationMask = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
with pytest.raises(NotImplementedError,
match=r'Cannot instantiate .*'):
_ = mask.ElevationMask(
_ElevationMask,
elevation_threshold=0)
def test_submergedLand(self):
"""Check what happens when there is no land above water."""
# define the mask
elevationmask = mask.ElevationMask(
rcm8cube['eta'][0, :, :],
elevation_threshold=0)
# assert - expect all True values should be up to a point
_whr_land = np.where(elevationmask._mask[:, 0])
assert _whr_land[0].size > 0 # if fails, no land found!
_row = int(_whr_land[0][-1]) + 1 # last index
third = elevationmask.shape[1]//3 # limit to left of inlet
assert np.all(elevationmask._mask[:_row, :third] == 1)
assert np.all(elevationmask._mask[_row:, :] == 0)
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_static_from_array(self):
"""Test that instantiation works for an array."""
# define the mask
elevationmask = mask.ElevationMask.from_array(np.ones((100, 200)))
# make assertions
assert elevationmask._input_flag == 'elevation'
class TestFlowMask:
"""Tests associated with the mask.LandMask class."""
def test_default_vals_array(self):
"""Test that instantiation works for an array."""
# define the mask
flowmask = mask.FlowMask(
golfcube['velocity'][-1, :, :],
flow_threshold=0.3)
# make assertions
assert flowmask._input_flag == 'array'
assert flowmask.mask_type == 'flow'
assert flowmask.flow_threshold == 0.3
assert flowmask.threshold == 0.3
assert flowmask.flow_threshold is flowmask.threshold
assert flowmask._mask.dtype == bool
# note that, the mask will take any array though...
# define the mask
flowmask_any = mask.FlowMask(
golfcube['eta'][-1, :, :],
flow_threshold=0)
assert flowmask_any._input_flag == 'array'
assert flowmask_any.mask_type == 'flow'
assert flowmask_any.flow_threshold == 0
assert flowmask_any.threshold == 0
assert flowmask_any.flow_threshold is flowmask_any.threshold
def test_all_below_threshold(self):
flowmask = mask.FlowMask(
golfcube['velocity'][-1, :, :],
flow_threshold=20)
# make assertions
assert flowmask._input_flag == 'array'
assert flowmask.mask_type == 'flow'
assert flowmask.flow_threshold == 20
assert flowmask.threshold == 20
assert flowmask.flow_threshold is flowmask.threshold
assert flowmask._mask.dtype == bool
assert np.all(flowmask.mask == 0)
def test_all_above_threshold(self):
flowmask = mask.FlowMask(
golfcube['velocity'][-1, :, :],
flow_threshold=-5)
# make assertions
assert flowmask._input_flag == 'array'
assert flowmask.mask_type == 'flow'
assert flowmask.flow_threshold == -5
assert flowmask.threshold == -5
assert flowmask.flow_threshold is flowmask.threshold
assert flowmask._mask.dtype == bool
assert np.all(flowmask.mask == 1)
def test_default_vals_array_needs_flow_threshold(self):
"""Test that instantiation works for an array."""
# define the mask
with pytest.raises(TypeError, match=r'.* missing'):
_ = mask.FlowMask(rcm8cube['velocity'][-1, :, :])
def test_default_vals_cube(self):
"""Test that instantiation works for an array."""
# define the mask
flowmask = mask.FlowMask(
rcm8cube, t=-1,
flow_threshold=0.3)
# make assertions
assert flowmask._input_flag == 'cube'
assert flowmask.mask_type == 'flow'
assert flowmask._mask.dtype == bool
def test_vals_cube_different_fields(self):
"""Test that instantiation works for an array."""
# define the mask
velmask = mask.FlowMask(
rcm8cube, t=-1,
cube_key='velocity',
flow_threshold=0.3)
# make assertions
assert velmask._input_flag == 'cube'
assert velmask.mask_type == 'flow'
assert velmask._mask.dtype == bool
dismask = mask.FlowMask(
rcm8cube, t=-1,
cube_key='discharge',
flow_threshold=0.3)
# make assertions
assert dismask._input_flag == 'cube'
assert dismask.mask_type == 'flow'
assert dismask._mask.dtype == bool
assert not np.all(velmask.mask == dismask.mask)
def test_default_vals_cubewithmeta(self):
"""Test that instantiation works
For a cube with metadata.
"""
# define the mask
flowmask = mask.FlowMask(
golfcube, t=-1,
flow_threshold=0.3)
# make assertions
assert flowmask._input_flag == 'cube'
assert flowmask.mask_type == 'flow'
assert flowmask._mask.dtype == bool
# compare with another instantiated from array
flowmask_comp = mask.FlowMask(
golfcube['velocity'][-1, :, :],
flow_threshold=0.3)
assert np.all(flowmask_comp.mask == flowmask.mask)
def test_flowthresh_vals_cubewithmeta(self):
# make default
flowmask = mask.FlowMask(
golfcube, t=-1,
flow_threshold=0.3)
# try with a different flow_threshold (higher)
flowmask_higher = mask.FlowMask(
golfcube, t=-1,
flow_threshold=0.5)
assert (np.sum(flowmask_higher.integer_mask) <
np.sum(flowmask.integer_mask))
def test_default_vals_cube_needs_flow_threshold(self):
"""Test that instantiation works for an array."""
# define the mask
with pytest.raises(TypeError, match=r'.* missing'):
_ = mask.FlowMask(
rcm8cube, t=-1)
with pytest.raises(TypeError, match=r'.* missing'):
_ = mask.FlowMask(
golfcube, t=-1)
def test_default_vals_mask_notimplemented(self):
"""Test that instantiation works for an array."""
# define the mask
_ElevationMask = mask.ElevationMask(
golfcube['eta'][-1, :, :],
elevation_threshold=0)
with pytest.raises(NotImplementedError,
match=r'Cannot instantiate .*'):
_ = mask.FlowMask(
_ElevationMask,
flow_threshold=0.3)
def test_submergedLand(self):
"""Check what happens when there is no land above water."""
# define the mask
flowmask = mask.FlowMask(
rcm8cube['velocity'][0, :, :],
flow_threshold=0.3)
# assert - expect doesnt care about land
assert flowmask.mask_type == 'flow'
@pytest.mark.xfail(raises=NotImplementedError, strict=True,
reason='Have not implemented pathway.')
def test_static_from_array(self):
"""Test that instantiation works for an array."""
# define the mask
flowmask = mask.FlowMask.from_array( | np.ones((100, 200)) | numpy.ones |
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
from common.utils import load_summary, load_bo_json_log, read_json_file, compute_std_of_mean
# ROOT = "/tank/zxxia/PCC-RL/results_0928/genet_no_reward_scale/genet_bbr_old"
SAVE_ROOTS = ["/datamirror/zxxia/PCC-RL/results_1006/gap_vs_improvement/test",
"/datamirror/zxxia/PCC-RL/results_1006/gap_vs_improvement_pretrained/test",
"/datamirror/zxxia/PCC-RL/results_1006/gap_vs_improvement_1/test"]
CONFIG_ROOTS = ["../../config/gap_vs_improvement",
"../../config/gap_vs_improvement",
"../../config/gap_vs_improvement"]
def compute_improve(fig_idx, config_id, before, after):
if fig_idx == 1 and config_id > 100:
return after - before + 40
return after - before
def load_results(save_dirs, cc='aurora'):
rewards = []
for save_dir in save_dirs:
if not os.path.exists(os.path.join(save_dir, '{}_summary.csv'.format(cc))):
continue
summary = load_summary(os.path.join(save_dir, '{}_summary.csv'.format(cc)))
rewards.append(summary['pkt_level_reward'])
return np.mean(rewards), np.std(rewards), | np.array(rewards) | numpy.array |
import numpy as np
from sympy import *
from numpy import transpose
from numpy import linalg as LA
from fractions import Fraction
import time
import matplotlib.pyplot as plt
from matplotlib import rcParams
import random
import copy
import csv
def number(str_input, str_error, str_error2, type_num): # str_input - строка выводимая пользователю при вводе
# str_error - ошибка: число не является числом ('строка')
# str_error2 - число не соответсвует указаным требованиям
# type_num - все допустимые типы чисел
"""
Принимает значение вводимые с клавиатуры
Производит проверку
Выводит название ошибок\число
"""
print(str_input)
num = input()
if 'i' in num:
num = itojnum(num)
num.replace(" ", "")
try:
check = complex(num) # Проверка: является ли числом (комплексное можем взять от любого числа)
except ValueError:
print(str_error)
return number(str_input, str_error, str_error2, type_num)
if (complex in type_num) and check.imag != 0: # Проверки для комплексных чисел
return jtoinum(num)
elif (complex in type_num) and check.imag == 0:
if (int in type_num):
if check.real == round(check.real):
return str(int(check.real))
if (float in type_num):
if check.real != round(check.real):
return str(float(check.real))
else:
print(str_error2)
return number(str_input, str_error, str_error2, type_num)
elif (float in type_num): # Проверки для вещественных чисел
if check.imag != 0:
print(str_error2)
return number(str_input, str_error, str_error2, type_num)
if (int in type_num):
if check.real == round(check.real):
return str(int(check.real))
else:
return str(float(check.real))
else: # Проверки для целых чисел
if check.imag != 0:
print(str_error2)
return number(str_input, str_error, str_error2, type_num)
elif check.real != round(check.real):
print(str_error2)
return number(str_input, str_error, str_error2, type_num)
return str(int(check.real))
# Функция генерации рандомных чисел для CSV-файла и python.
def random_numbers(row,minim,maxi):
complex_numb=[]
for i in range(row**3):
floatnump=random.randint(1,6)
numb_of_list=random.randint(1,2)
if numb_of_list==1:
a=random.randint(minim,maxi)
else:
a=round(random.uniform(minim,maxi),floatnump)
numb_of_list=random.randint(1,2)
if numb_of_list==1:
b=random.randint(minim,maxi)
else:
b=round(random.uniform(minim,maxi),floatnump)
complex_numb.append(complex(a,b))
result=[0]*row
for i in range(row):
floatnump=random.randint(1,6)
numb_of_list=random.randint(1,3)
if numb_of_list==1:
result[i]=str(random.randint(minim,maxi))
if numb_of_list==2:
result[i]=str(round(random.uniform(minim,maxi),floatnump))
if numb_of_list==3:
result[i]=str(random.choice(complex_numb))
return result
# Функция ввода матрицы через клавиатуру
def default_matrix(): # N - кол-во строк, M - кол-во столбцов
"""
На вход принимает значение матрицы
ЗАпоминает индекс
Возвращает значения
"""
try:
rowcol = list(map(int,input('Введите количество строк и столбцов: ').split()))
N = rowcol[0]
M = rowcol[1]
if len(rowcol) > 2:
print('Введено слишком много значений. Попробуйте ещё раз.')
return default_matrix()
except ValueError:
print('Введено не целое значение строки и/или столбца. Попробуйте ещё раз.')
return default_matrix()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return default_matrix()
if N == 0 or M == 0:
print('Введено нулевое значение! Количество строк и столбцов должно быть минимум 1!!')
return default_matrix()
mtx = [[0] * M for i in range(N)]
for n in range(N):
for m in range(M):
mtx[n][m] = number(f'Введите значение для элемента матрицы a[{n + 1}][{m + 1}]: ',
'Введено неверное выражение. Попробуйте ещё раз',
'Введено число в неверном формате. Попробуйте ещё раз.',
[complex, float, int])
for n in range(len(mtx)):
#mtx[n].append('|')
mtx[n].append(number(f'Введите значение для свободного члена {n + 1} строки: ',
'Введено неверное выражение. Попробуйте ещё раз',
'Введено число в неверном формате. Попробуйте ещё раз.',
[complex, float, int]))
return mtx
# Функция ввода матрицы через радомную генерацию python.
def python_generator():
"""
Значение кол-во строк и столбцов
Создает матрицу
Выводит матрциу
"""
try:
rowcol = list(map(int,input('Введите количество строк и столбцов (N M): ').split()))
N = rowcol[0]
M = rowcol[1]
if len(rowcol) > 2:
print('Введено слишком много значений. Попробуйте ещё раз.')
return python_generator()
except ValueError:
print('Введено не целое значение строки и/или столбца. Попробуйте ещё раз.')
return python_generator()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return python_generator()
if N == 0 or M == 0:
print('Введено нулевое значение! Количество строк и столбцов должно быть минимум 1!!')
return python_generator()
try:
minmax = list(map(int,input('Введите минимальное и максимальное значене для элемента матрицы (также для мнимой части комплексного числа) (min max): ').split()))
mini = minmax[0]
maxi = minmax[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return python_generator()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return python_generator()
if mini > maxi:
print(f'Минимальное число не может быть больше максимального ({mini}!>{maxi})!!')
return python_generator()
result=[]
for i in range(M):
result.append(random_numbers(N,mini,maxi))
for row in range(len(result)):
#result[row].append('|')
result[row].append(random_numbers(1,mini,maxi))
result[row][-1]=str(result[row][-1][0])
result=jtoi(result)
result=del_bracket(result)
return result
# Функция ввода матрицы через CSV-файл.
def csv_generator():
"""
На вход принимается кол-во столбцов и строк
Считывается файл csv
Выводит значения
"""
try:
rowcol = list(map(int,input('Введите количество строк и столбцов (N M): ').split()))
N = rowcol[0]
M = rowcol[1]
if len(rowcol) > 2:
print('Введено слишком много значений. Попробуйте ещё раз.')
return csv_generator()
except ValueError:
print('Введено не целое значение строки и/или столбца. Попробуйте ещё раз.')
return csv_generator()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return csv_generator()
if N == 0 or M == 0:
print('Введено нулевое значение! Количество строк и столбцов должно быть минимум 1!!')
return csv_generator()
try:
minmax = list(map(int,input('Введите минимальное и максимальное значене для элемента матрицы (также для мнимой части комплексного числа) (min max): ').split()))
mini = minmax[0]
maxi = minmax[1]
except ValueError:
print('Ошибка ввода. Попробуйте ещё раз.')
return csv_generator()
except IndexError:
print('Введено слишком мало чисел. Попробуйте ещё раз.')
return csv_generator()
if mini > maxi:
print(f'Минимальное число не может быть больше максимального ({mini}!>{maxi})!!')
return csv_generator()
result=[]
for i in range(M):
result.append(random_numbers(N,mini,maxi))
for row in range(len(result)):
#result[row].append('|')
result[row].append(random_numbers(1,mini,maxi))
result[row][-1]=str(result[row][-1][0])
result=jtoi(result)
result=del_bracket(result)
with open('Answer_file.csv','w',newline='') as csvfile:
writer=csv.writer(csvfile,delimiter=';')
for row in result:
writer.writerow(row)
Matrix_in=[]
with open('Answer_file.csv',newline='') as csvfile:
reader = csv.reader(csvfile,delimiter=';')
Matrix_in=[]
for row in reader:
Matrix_in.append(list(row))
return Matrix_in
# Функция преобразования "i" в "j" для списка
def itoj(mtx):
ans = []
for i in range(len(mtx)):
temp = []
y = mtx[i]
for j in y:
temp.append(j.replace('i','j'))
ans.append(temp)
return ans
# Функция преобразования "j)" в "i" для списка.
def jtoi(mtx):
ans = []
for i in range(len(mtx)):
temp = []
y = mtx[i]
for j in y:
temp.append(j.replace('j)','i'))
ans.append(temp)
return ans
# Функция преобразования удаления левой скобки для списка.
def del_bracket(mtx):
ans = []
for i in range(len(mtx)):
temp = []
y = mtx[i]
for j in y:
temp.append(j.replace('(',''))
ans.append(temp)
return ans
# Функция преобразования "i" в "j" для строки.
def itojnum(st):
ans = ''
for i in st:
ans += i.replace('i','j')
return ans
# Функция преобразования "j" в "i" для строки.
def jtoinum(st):
ans = ''
for i in st:
ans += i.replace('j','i')
return ans
# Функция итерации матрицы всеми способами.
def iteration():
print("Как вы хотите ввести матрицу:\n 1 - С кливаитуры\n 2 - Рандомная генерация в python\n 3 - CSV Файл")
try:
choice = int(input('Вы ввели: '))
choices_dict = {1: default_matrix, 2: python_generator , 3: csv_generator}
mtx = choices_dict[choice]()
except KeyError:
print('Введено неверное значение ввода матрицы. Попробуйте ещё раз.')
return iteration()
except ValueError:
print('Введено неверное значение ввода матрицы. Попробуйте ещё раз.')
return iteration()
return mtx
# Преобразование строковых значений в комплексные
def str_to_complex(mtx1):
mtx = copy.deepcopy(mtx1)
for row in mtx:
for i in range(len(row)):
row[i]=complex(itojnum(row[i]))
return(mtx)
# Функция преобразует комплексные числа в другие типы чисел
def complex_to_num(st):
if st.imag==0:
if round(st.real)==st.real:
return int(st.real)
else:
return float(st.real)
else:
return complex(st.real, st.imag)
# Создание из строковых чисел правильные дроби
def numbers_to_fractions(mtx):
for row in range(len(mtx)):
for col in range(len(mtx[row])):
if 'i' in mtx[row][col]:
return 'Функция не работает с комплексными числами'
mtx[row][col]=Fraction(mtx[row][col])
return mtx
# Нахождение определителя матрицы
def det_my_matrix(mtx):
Lmtx=len(mtx)
if Lmtx==1:
return mtx[0][0]
if Lmtx==2:
return mtx[0][0]*mtx[1][1]-(mtx[0][1]*mtx[1][0])
result=0
for i in range(Lmtx):
factor=1
if i % 2:
factor=-1
mtx2=[]
for row in range(Lmtx):
mtx3=[]
for col in range(Lmtx):
if row!=0 and col!=i:
mtx3.append(mtx[row][col])
if mtx3:
mtx2.append(mtx3)
result+=factor*mtx[0][i]*det_my_matrix(mtx2)
return(result)
# Вычисление обратной матрицы
def inverse_matrix(mtx):
Lmtx = len(mtx)
mult = det_my_matrix(mtx)
if mult == 0:
return 'Матрица вырожденная'
ans = [[0] * Lmtx for i in range(Lmtx)]
for i in range(Lmtx):
for j in range(Lmtx):
factor=1
if (i+j) % 2:
factor=-1
mtx2 = []
for i1 in range(Lmtx):
if i1 != i:
mtx3 = []
for j1 in range(Lmtx):
if j1 != j:
mtx3.append(mtx[i1][j1])
mtx2.append(mtx3)
ans[j][i] = factor * det_my_matrix(mtx2) / mult
return ans
def diag(mtx1):
mtx = copy.deepcopy(mtx1)
for row in range(len(mtx)):
for col in range(len(mtx[row])):
if row==col:
mtx[row]=list(np.array(mtx[row])/mtx[row][col])
return mtx
# Вычисление матрицы коэффициентов
def coeff_mtx(mtx):
mtx1 = []
for i in range(len(mtx)):
mtx1.append(mtx[i][:-1])
return mtx1
# Вычисление вектора своюодных членов
def coeff_vect(mtx):
mtx1 = []
for i in range(len(mtx)):
mtx1.append(mtx[i][-1])
return mtx1
# Вычисление матодом простых итераций Якоби
def jacobi(arr,x,acc):
arr1 = coeff_mtx(arr)
vect = coeff_vect(arr)
D = np.diag(arr1)
R = arr1 - np.diagflat(D)
x1 = [i for i in x]
x = (vect - np.dot(R,x)) / D
fin = abs(x1 - x)
itr = 0
while max(fin)>=acc:
if itr >= 100:
return 'Матрица расходится'
itr += 1
x1 = [i for i in x]
x = (vect - np.dot(R,x)) / D
fin = abs(x1 - x)
return x
# Метод простых итераций Якоби
def jacobian_method(mtx):
mtx1 = str_to_complex(mtx)
coeff = coeff_mtx(mtx1)
vect = coeff_vect(mtx1)
n = len(mtx)
print('Прямая матрица коэффициентов:')
for i in range(n):
print(coeff[i])
rev = inverse_matrix(coeff)
print('Обратная матрица коэффициентов:')
for i in range(n):
print(rev[i])
print('Решение СЛАУ методом простых итераций Якоби:')
mtx2 = np.array(mtx1)
x = np.array([0 for i in range(n)])
acc = 0.001
sol = jacobi(mtx2, x, acc)
print(sol)
print('Число обусловленности Матрицы Коэффициентов A: ')
conditional_jac = | LA.cond(coeff) | numpy.linalg.cond |
import os
import copy
import numpy as np
from itertools import groupby
from .utils_def import totim_to_datetime
from . import import_optional_dependency
class ZoneBudget:
"""
ZoneBudget class
Parameters
----------
cbc_file : str or CellBudgetFile object
The file name or CellBudgetFile object for which budgets will be
computed.
z : ndarray
The array containing to zones to be used.
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
The kstp and kper values are zero based.
totim : float
The simulation time.
aliases : dict
A dictionary with key, value pairs of zones and aliases. Replaces
the corresponding record and field names with the aliases provided.
When using this option in conjunction with a list of zones, the
zone(s) passed may either be all strings (aliases), all integers,
or mixed.
Returns
-------
None
Examples
--------
>>> from flopy.utils.zonbud import ZoneBudget
>>> zon = ZoneBudget.read_zone_file('zone_input_file')
>>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0))
>>> zb.to_csv('zonebudtest.csv')
>>> zb_mgd = zb * 7.48052 / 1000000
"""
def __init__(
self,
cbc_file,
z,
kstpkper=None,
totim=None,
aliases=None,
verbose=False,
**kwargs,
):
from .binaryfile import CellBudgetFile
if isinstance(cbc_file, CellBudgetFile):
self.cbc = cbc_file
elif isinstance(cbc_file, str) and os.path.isfile(cbc_file):
self.cbc = CellBudgetFile(cbc_file)
else:
raise Exception(f"Cannot load cell budget file: {cbc_file}.")
if isinstance(z, np.ndarray):
assert np.issubdtype(
z.dtype, np.integer
), "Zones dtype must be integer"
else:
e = (
"Please pass zones as a numpy ndarray of (positive)"
" integers. {}".format(z.dtype)
)
raise Exception(e)
# Check for negative zone values
if np.any(z < 0):
raise Exception(
"Negative zone value(s) found:", np.unique(z[z < 0])
)
self.dis = None
if "model" in kwargs.keys():
self.model = kwargs.pop("model")
self.dis = self.model.dis
if "dis" in kwargs.keys():
self.dis = kwargs.pop("dis")
if len(kwargs.keys()) > 0:
args = ",".join(kwargs.keys())
raise Exception(f"LayerFile error: unrecognized kwargs: {args}")
# Check the shape of the cbc budget file arrays
self.cbc_shape = self.cbc.get_data(idx=0, full3D=True)[0].shape
self.nlay, self.nrow, self.ncol = self.cbc_shape
self.cbc_times = self.cbc.get_times()
self.cbc_kstpkper = self.cbc.get_kstpkper()
self.kstpkper = None
self.totim = None
if kstpkper is not None:
if isinstance(kstpkper, tuple):
kstpkper = [kstpkper]
for kk in kstpkper:
s = f"The specified time step/stress period does not exist {kk}"
assert kk in self.cbc.get_kstpkper(), s
self.kstpkper = kstpkper
elif totim is not None:
if isinstance(totim, float):
totim = [totim]
elif isinstance(totim, int):
totim = [float(totim)]
for t in totim:
s = f"The specified simulation time does not exist {t}"
assert t in self.cbc.get_times(), s
self.totim = totim
else:
# No time step/stress period or simulation time pass
self.kstpkper = self.cbc.get_kstpkper()
# Set float and integer types
self.float_type = np.float32
self.int_type = np.int32
# Check dimensions of input zone array
s = (
"Row/col dimensions of zone array {}"
" do not match model row/col dimensions {}".format(
z.shape, self.cbc_shape
)
)
assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s
if z.shape == self.cbc_shape:
izone = z.copy()
elif len(z.shape) == 2:
izone = np.zeros(self.cbc_shape, self.int_type)
izone[:] = z[:, :]
elif len(z.shape) == 3 and z.shape[0] == 1:
izone = np.zeros(self.cbc_shape, self.int_type)
izone[:] = z[0, :, :]
else:
e = f"Shape of the zone array is not recognized: {z.shape}"
raise Exception(e)
self.izone = izone
self.allzones = np.unique(izone)
self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones}
if aliases is not None:
s = (
"Input aliases not recognized. Please pass a dictionary "
"with key,value pairs of zone/alias."
)
assert isinstance(aliases, dict), s
# Replace the relevant field names (ignore zone 0)
seen = []
for z, a in iter(aliases.items()):
if z != 0 and z in self._zonenamedict.keys():
if z in seen:
raise Exception(
"Zones may not have more than 1 alias."
)
self._zonenamedict[z] = "_".join(a.split())
seen.append(z)
# self._iflow_recnames = self._get_internal_flow_record_names()
# All record names in the cell-by-cell budget binary file
self.record_names = [
n.strip() for n in self.cbc.get_unique_record_names(decode=True)
]
# Get imeth for each record in the CellBudgetFile record list
self.imeth = {}
for record in self.cbc.recordarray:
self.imeth[record["text"].strip().decode("utf-8")] = record[
"imeth"
]
# INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES.
# CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS
# ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF FLOW.
# SWIADDTO--- terms are used by the SWI2 groundwater flow process.
internal_flow_terms = [
"CONSTANT HEAD",
"FLOW RIGHT FACE",
"FLOW FRONT FACE",
"FLOW LOWER FACE",
"SWIADDTOCH",
"SWIADDTOFRF",
"SWIADDTOFFF",
"SWIADDTOFLF",
]
# Source/sink/storage term record names
# These are all of the terms that are not related to constant
# head cells or face flow terms
self.ssst_record_names = [
n for n in self.record_names if n not in internal_flow_terms
]
# Initialize budget recordarray
array_list = []
if self.kstpkper is not None:
for kk in self.kstpkper:
recordarray = self._initialize_budget_recordarray(
kstpkper=kk, totim=None
)
array_list.append(recordarray)
elif self.totim is not None:
for t in self.totim:
recordarray = self._initialize_budget_recordarray(
kstpkper=None, totim=t
)
array_list.append(recordarray)
self._budget = np.concatenate(array_list, axis=0)
# Update budget record array
if self.kstpkper is not None:
for kk in self.kstpkper:
if verbose:
s = (
"Computing the budget for"
" time step {} in stress period {}".format(
kk[0] + 1, kk[1] + 1
)
)
print(s)
self._compute_budget(kstpkper=kk)
elif self.totim is not None:
for t in self.totim:
if verbose:
s = f"Computing the budget for time {t}"
print(s)
self._compute_budget(totim=t)
def _compute_budget(self, kstpkper=None, totim=None):
"""
Creates a budget for the specified zone array. This function only
supports the use of a single time step/stress period or time.
Parameters
----------
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
None
"""
# Initialize an array to track where the constant head cells
# are located.
ich = np.zeros(self.cbc_shape, self.int_type)
swiich = np.zeros(self.cbc_shape, self.int_type)
if "CONSTANT HEAD" in self.record_names:
"""
C-----CONSTANT-HEAD FLOW -- DON'T ACCUMULATE THE CELL-BY-CELL VALUES FOR
C-----CONSTANT-HEAD FLOW BECAUSE THEY MAY INCLUDE PARTIALLY CANCELING
C-----INS AND OUTS. USE CONSTANT-HEAD TERM TO IDENTIFY WHERE CONSTANT-
C-----HEAD CELLS ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF
C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY.
"""
chd = self.cbc.get_data(
text="CONSTANT HEAD",
full3D=True,
kstpkper=kstpkper,
totim=totim,
)[0]
ich[np.ma.where(chd != 0.0)] = 1
if "FLOW RIGHT FACE" in self.record_names:
self._accumulate_flow_frf("FLOW RIGHT FACE", ich, kstpkper, totim)
if "FLOW FRONT FACE" in self.record_names:
self._accumulate_flow_fff("FLOW FRONT FACE", ich, kstpkper, totim)
if "FLOW LOWER FACE" in self.record_names:
self._accumulate_flow_flf("FLOW LOWER FACE", ich, kstpkper, totim)
if "SWIADDTOCH" in self.record_names:
swichd = self.cbc.get_data(
text="SWIADDTOCH", full3D=True, kstpkper=kstpkper, totim=totim
)[0]
swiich[swichd != 0] = 1
if "SWIADDTOFRF" in self.record_names:
self._accumulate_flow_frf("SWIADDTOFRF", swiich, kstpkper, totim)
if "SWIADDTOFFF" in self.record_names:
self._accumulate_flow_fff("SWIADDTOFFF", swiich, kstpkper, totim)
if "SWIADDTOFLF" in self.record_names:
self._accumulate_flow_flf("SWIADDTOFLF", swiich, kstpkper, totim)
# NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE
# ACCUMULATE THE FLOW BY ZONE
# iterate over remaining items in the list
for recname in self.ssst_record_names:
self._accumulate_flow_ssst(recname, kstpkper, totim)
# Compute mass balance terms
self._compute_mass_balance(kstpkper, totim)
return
def _add_empty_record(
self, recordarray, recname, kstpkper=None, totim=None
):
"""
Build an empty records based on the specified flow direction and
record name for the given list of zones.
Parameters
----------
recordarray :
recname :
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
recordarray : np.recarray
"""
if kstpkper is not None:
if len(self.cbc_times) > 0:
totim = self.cbc_times[self.cbc_kstpkper.index(kstpkper)]
else:
totim = 0.0
elif totim is not None:
if len(self.cbc_times) > 0:
kstpkper = self.cbc_kstpkper[self.cbc_times.index(totim)]
else:
kstpkper = (0, 0)
row = [totim, kstpkper[0], kstpkper[1], recname]
row += [0.0 for _ in self._zonenamedict.values()]
recs = np.array(tuple(row), dtype=recordarray.dtype)
recordarray = np.append(recordarray, recs)
return recordarray
def _initialize_budget_recordarray(self, kstpkper=None, totim=None):
"""
Initialize the budget record array which will store all of the
fluxes in the cell-budget file.
Parameters
----------
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
"""
# Create empty array for the budget terms.
dtype_list = [
("totim", "<f4"),
("time_step", "<i4"),
("stress_period", "<i4"),
("name", (str, 50)),
]
dtype_list += [
(n, self.float_type) for n in self._zonenamedict.values()
]
dtype = np.dtype(dtype_list)
recordarray = np.array([], dtype=dtype)
# Add "from" records
if "STORAGE" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "FROM_STORAGE", kstpkper, totim
)
if "CONSTANT HEAD" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "FROM_CONSTANT_HEAD", kstpkper, totim
)
for recname in self.ssst_record_names:
if recname != "STORAGE":
recordarray = self._add_empty_record(
recordarray,
"FROM_" + "_".join(recname.split()),
kstpkper,
totim,
)
for z, n in self._zonenamedict.items():
if z == 0 and 0 not in self.allzones:
continue
else:
recordarray = self._add_empty_record(
recordarray, "FROM_" + "_".join(n.split()), kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "TOTAL_IN", kstpkper, totim
)
# Add "out" records
if "STORAGE" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "TO_STORAGE", kstpkper, totim
)
if "CONSTANT HEAD" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "TO_CONSTANT_HEAD", kstpkper, totim
)
for recname in self.ssst_record_names:
if recname != "STORAGE":
recordarray = self._add_empty_record(
recordarray,
"TO_" + "_".join(recname.split()),
kstpkper,
totim,
)
for z, n in self._zonenamedict.items():
if z == 0 and 0 not in self.allzones:
continue
else:
recordarray = self._add_empty_record(
recordarray, "TO_" + "_".join(n.split()), kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "TOTAL_OUT", kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "IN-OUT", kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "PERCENT_DISCREPANCY", kstpkper, totim
)
return recordarray
@staticmethod
def _filter_circular_flow(fz, tz, f):
"""
Parameters
----------
fz
tz
f
Returns
-------
"""
e = np.equal(fz, tz)
fz = fz[np.logical_not(e)]
tz = tz[np.logical_not(e)]
f = f[np.logical_not(e)]
return fz, tz, f
def _update_budget_fromfaceflow(
self, fz, tz, f, kstpkper=None, totim=None
):
"""
Parameters
----------
fz
tz
f
kstpkper
totim
Returns
-------
"""
# No circular flow within zones
fz, tz, f = self._filter_circular_flow(fz, tz, f)
if len(f) == 0:
return
# Inflows
idx = tz != 0
fzi = fz[idx]
tzi = tz[idx]
rownames = ["FROM_" + self._zonenamedict[z] for z in fzi]
colnames = [self._zonenamedict[z] for z in tzi]
fluxes = f[idx]
self._update_budget_recordarray(
rownames, colnames, fluxes, kstpkper, totim
)
# Outflows
idx = fz != 0
fzi = fz[idx]
tzi = tz[idx]
rownames = ["TO_" + self._zonenamedict[z] for z in tzi]
colnames = [self._zonenamedict[z] for z in fzi]
fluxes = f[idx]
self._update_budget_recordarray(
rownames, colnames, fluxes, kstpkper, totim
)
return
def _update_budget_fromssst(self, fz, tz, f, kstpkper=None, totim=None):
"""
Parameters
----------
fz
tz
f
kstpkper
totim
Returns
-------
"""
if len(f) == 0:
return
self._update_budget_recordarray(fz, tz, f, kstpkper, totim)
return
def _update_budget_recordarray(
self, rownames, colnames, fluxes, kstpkper=None, totim=None
):
"""
Update the budget record array with the flux for the specified
flow direction (in/out), record name, and column.
Parameters
----------
rownames
colnames
fluxes
kstpkper
totim
Returns
-------
None
"""
try:
if kstpkper is not None:
for rn, cn, flux in zip(rownames, colnames, fluxes):
rowidx = np.where(
(self._budget["time_step"] == kstpkper[0])
& (self._budget["stress_period"] == kstpkper[1])
& (self._budget["name"] == rn)
)
self._budget[cn][rowidx] += flux
elif totim is not None:
for rn, cn, flux in zip(rownames, colnames, fluxes):
rowidx = np.where(
(self._budget["totim"] == totim)
& (self._budget["name"] == rn)
)
self._budget[cn][rowidx] += flux
except Exception as e:
print(e)
raise
return
def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.ncol >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS.
# COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM
# ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4.
# 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K
k, i, j = np.where(
self.izone[:, :, 1:] > self.izone[:, :, :-1]
)
# Adjust column values to account for the starting position of "nz"
j += 1
# Define the zone to which flow is going
nz = self.izone[k, i, j]
# Define the zone from which flow is coming
jl = j - 1
nzl = self.izone[k, i, jl]
# Get the face flow
q = data[k, i, jl]
# Get indices where flow face values are positive (flow out of higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# FLOW BETWEEN NODE J,I,K AND J+1,I,K
k, i, j = np.where(
self.izone[:, :, :-1] > self.izone[:, :, 1:]
)
# Define the zone from which flow is coming
nz = self.izone[k, i, j]
# Define the zone to which flow is going
jr = j + 1
nzr = self.izone[k, i, jr]
# Get the face flow
q = data[k, i, j]
# Get indices where flow face values are positive (flow out of higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[j > 0], i[j > 0], j[j > 0]
jl = j - 1
nzl = self.izone[k, i, jl]
nz = self.izone[k, i, j]
q = data[k, i, jl]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi[tzi != 0]]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[j < self.ncol - 1],
i[j < self.ncol - 1],
j[j < self.ncol - 1],
)
nz = self.izone[k, i, j]
jr = j + 1
nzr = self.izone[k, i, jr]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.nrow >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW FRONT FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K
k, i, j = np.where(
self.izone[:, 1:, :] < self.izone[:, :-1, :]
)
i += 1
ia = i - 1
nza = self.izone[k, ia, j]
nz = self.izone[k, i, j]
q = data[k, ia, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K.
k, i, j = np.where(
self.izone[:, :-1, :] < self.izone[:, 1:, :]
)
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[i > 0], i[i > 0], j[i > 0]
ia = i - 1
nza = self.izone[k, ia, j]
nz = self.izone[k, i, j]
q = data[k, ia, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[i < self.nrow - 1],
i[i < self.nrow - 1],
j[i < self.nrow - 1],
)
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.nlay >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW LOWER FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1
k, i, j = np.where(
self.izone[1:, :, :] < self.izone[:-1, :, :]
)
k += 1
ka = k - 1
nza = self.izone[ka, i, j]
nz = self.izone[k, i, j]
q = data[ka, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1
k, i, j = np.where(
self.izone[:-1, :, :] < self.izone[1:, :, :]
)
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[k > 0], i[k > 0], j[k > 0]
ka = k - 1
nza = self.izone[ka, i, j]
nz = self.izone[k, i, j]
q = data[ka, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[k < self.nlay - 1],
i[k < self.nlay - 1],
j[k < self.nlay - 1],
)
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_ssst(self, recname, kstpkper, totim):
# NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE
# ACCUMULATE THE FLOW BY ZONE
imeth = self.imeth[recname]
data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)
if len(data) == 0:
# Empty data, can occur during the first time step of a transient
# model when storage terms are zero and not in the cell-budget
# file.
return
else:
data = data[0]
if imeth == 2 or imeth == 5:
# LIST
qin = np.ma.zeros(
(self.nlay * self.nrow * self.ncol), self.float_type
)
qout = np.ma.zeros(
(self.nlay * self.nrow * self.ncol), self.float_type
)
for [node, q] in zip(data["node"], data["q"]):
idx = node - 1
if q > 0:
qin.data[idx] += q
elif q < 0:
qout.data[idx] += q
qin = np.ma.reshape(qin, (self.nlay, self.nrow, self.ncol))
qout = np.ma.reshape(qout, (self.nlay, self.nrow, self.ncol))
elif imeth == 0 or imeth == 1:
# FULL 3-D ARRAY
qin = np.ma.zeros(self.cbc_shape, self.float_type)
qout = np.ma.zeros(self.cbc_shape, self.float_type)
qin[data > 0] = data[data > 0]
qout[data < 0] = data[data < 0]
elif imeth == 3:
# 1-LAYER ARRAY WITH LAYER INDICATOR ARRAY
rlay, rdata = data[0], data[1]
data = np.ma.zeros(self.cbc_shape, self.float_type)
for (r, c), l in np.ndenumerate(rlay):
data[l - 1, r, c] = rdata[r, c]
qin = np.ma.zeros(self.cbc_shape, self.float_type)
qout = np.ma.zeros(self.cbc_shape, self.float_type)
qin[data > 0] = data[data > 0]
qout[data < 0] = data[data < 0]
elif imeth == 4:
# 1-LAYER ARRAY THAT DEFINES LAYER 1
qin = np.ma.zeros(self.cbc_shape, self.float_type)
qout = np.ma.zeros(self.cbc_shape, self.float_type)
r, c = np.where(data > 0)
qin[0, r, c] = data[r, c]
r, c = np.where(data < 0)
qout[0, r, c] = data[r, c]
else:
# Should not happen
raise Exception(
f'Unrecognized "imeth" for {recname} record: {imeth}'
)
# Inflows
fz = []
tz = []
f = []
for z in self.allzones:
if z != 0:
flux = qin[(self.izone == z)].sum()
if type(flux) == np.ma.core.MaskedConstant:
flux = 0.0
fz.append("FROM_" + "_".join(recname.split()))
tz.append(self._zonenamedict[z])
f.append(flux)
fz = np.array(fz)
tz = np.array(tz)
f = np.array(f)
self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
# Outflows
fz = []
tz = []
f = []
for z in self.allzones:
if z != 0:
flux = qout[(self.izone == z)].sum()
if type(flux) == np.ma.core.MaskedConstant:
flux = 0.0
fz.append("TO_" + "_".join(recname.split()))
tz.append(self._zonenamedict[z])
f.append(flux)
fz = np.array(fz)
tz = np.array(tz)
f = np.array(f)
self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
def _compute_mass_balance(self, kstpkper, totim):
# Returns a record array with total inflow, total outflow,
# and percent error summed by column.
skipcols = ["time_step", "stress_period", "totim", "name"]
# Compute inflows
recnames = self.get_record_names()
innames = [n for n in recnames if n.startswith("FROM_")]
outnames = [n for n in recnames if n.startswith("TO_")]
if kstpkper is not None:
rowidx = np.where(
(self._budget["time_step"] == kstpkper[0])
& (self._budget["stress_period"] == kstpkper[1])
& np.in1d(self._budget["name"], innames)
)
elif totim is not None:
rowidx = np.where(
(self._budget["totim"] == totim)
& np.in1d(self._budget["name"], innames)
)
a = _numpyvoid2numeric(
self._budget[list(self._zonenamedict.values())][rowidx]
)
intot = np.array(a.sum(axis=0))
tz = np.array(
list([n for n in self._budget.dtype.names if n not in skipcols])
)
fz = np.array(["TOTAL_IN"] * len(tz))
self._update_budget_fromssst(fz, tz, intot, kstpkper, totim)
# Compute outflows
if kstpkper is not None:
rowidx = np.where(
(self._budget["time_step"] == kstpkper[0])
& (self._budget["stress_period"] == kstpkper[1])
& np.in1d(self._budget["name"], outnames)
)
elif totim is not None:
rowidx = np.where(
(self._budget["totim"] == totim)
& np.in1d(self._budget["name"], outnames)
)
a = _numpyvoid2numeric(
self._budget[list(self._zonenamedict.values())][rowidx]
)
outot = np.array(a.sum(axis=0))
tz = np.array(
list([n for n in self._budget.dtype.names if n not in skipcols])
)
fz = np.array(["TOTAL_OUT"] * len(tz))
self._update_budget_fromssst(fz, tz, outot, kstpkper, totim)
# Compute IN-OUT
tz = np.array(
list([n for n in self._budget.dtype.names if n not in skipcols])
)
f = intot - outot
fz = np.array(["IN-OUT"] * len(tz))
self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
# Compute percent discrepancy
tz = np.array(
list([n for n in self._budget.dtype.names if n not in skipcols])
)
fz = np.array(["PERCENT_DISCREPANCY"] * len(tz))
in_minus_out = intot - outot
in_plus_out = intot + outot
f = 100 * in_minus_out / (in_plus_out / 2.0)
self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim)
def get_model_shape(self):
"""Get model shape
Returns
-------
nlay : int
Number of layers
nrow : int
Number of rows
ncol : int
Number of columns
"""
return self.nlay, self.nrow, self.ncol
def get_record_names(self, stripped=False):
"""
Get a list of water budget record names in the file.
Returns
-------
out : list of strings
List of unique text names in the binary file.
Examples
--------
>>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0))
>>> recnames = zb.get_record_names()
"""
return _get_record_names(self._budget, stripped=stripped)
def get_budget(self, names=None, zones=None, net=False, pivot=False):
"""
Get a list of zonebudget record arrays.
Parameters
----------
names : list of strings
A list of strings containing the names of the records desired.
zones : list of ints or strings
A list of integer zone numbers or zone names desired.
net : boolean
If True, returns net IN-OUT for each record.
pivot : boolean
If True, returns data in a more user friendly format
Returns
-------
budget_list : list of record arrays
A list of the zonebudget record arrays.
Examples
--------
>>> names = ['FROM_CONSTANT_HEAD', 'RIVER_LEAKAGE_OUT']
>>> zones = ['ZONE_1', 'ZONE_2']
>>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0))
>>> bud = zb.get_budget(names=names, zones=zones)
"""
recarray = _get_budget(
self._budget, self._zonenamedict, names=names, zones=zones, net=net
)
if pivot:
recarray = _pivot_recarray(recarray)
return recarray
def get_volumetric_budget(
self, modeltime, recarray=None, extrapolate_kper=False
):
"""
Method to generate a volumetric budget table based on flux information
Parameters
----------
modeltime : flopy.discretization.ModelTime object
ModelTime object for calculating volumes
recarray : np.recarray
optional, user can pass in a numpy recarray to calculate volumetric
budget. recarray must be pivoted before passing to
get_volumetric_budget
extrapolate_kper : bool
flag to determine if we fill in data gaps with other
timestep information from the same stress period.
if True, we assume that flux is constant throughout a stress period
and the pandas dataframe returned contains a
volumetric budget per stress period
if False, calculates volumes from available flux data
Returns
-------
pd.DataFrame
"""
if recarray is None:
recarray = self.get_budget(pivot=True)
return _volumetric_flux(recarray, modeltime, extrapolate_kper)
def to_csv(self, fname):
"""
Saves the budget record arrays to a formatted
comma-separated values file.
Parameters
----------
fname : str
The name of the output comma-separated values file.
Returns
-------
None
"""
# Needs updating to handle the new budget list structure. Write out
# budgets for all kstpkper if kstpkper is None or pass list of
# kstpkper/totim to save particular budgets.
with open(fname, "w") as f:
# Write header
f.write(",".join(self._budget.dtype.names) + "\n")
# Write rows
for rowidx in range(self._budget.shape[0]):
s = (
",".join([str(i) for i in list(self._budget[:][rowidx])])
+ "\n"
)
f.write(s)
return
def get_dataframes(
self,
start_datetime=None,
timeunit="D",
index_key="totim",
names=None,
zones=None,
net=False,
pivot=False,
):
"""
Get pandas dataframes.
Parameters
----------
start_datetime : str
Datetime string indicating the time at which the simulation starts.
timeunit : str
String that indicates the time units used in the model.
index_key : str
Indicates the fields to be used (in addition to "record") in the
resulting DataFrame multi-index.
names : list of strings
A list of strings containing the names of the records desired.
zones : list of ints or strings
A list of integer zone numbers or zone names desired.
net : boolean
If True, returns net IN-OUT for each record.
pivot : bool
If True, returns dataframe in a more user friendly format
Returns
-------
df : Pandas DataFrame
Pandas DataFrame with the budget information.
Examples
--------
>>> from flopy.utils.zonbud import ZoneBudget
>>> zon = ZoneBudget.read_zone_file('zone_input_file')
>>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0))
>>> df = zb.get_dataframes()
"""
recarray = self.get_budget(names, zones, net, pivot=pivot)
return _recarray_to_dataframe(
recarray,
self._zonenamedict,
start_datetime=start_datetime,
timeunit=timeunit,
index_key=index_key,
zones=zones,
pivot=pivot,
)
@classmethod
def _get_otype(cls, fname):
"""
Method to automatically distinguish output type based on the
zonebudget header
Parameters
----------
fname : str
zonebudget output file name
Returns
-------
otype : int
"""
with open(fname) as foo:
line = foo.readline()
if "zonebudget version" in line.lower():
otype = 0
elif "time step" in line.lower():
otype = 1
elif "totim" in line.lower():
otype = 2
else:
raise AssertionError("Cant distinguish output type")
return otype
@classmethod
def read_output(cls, fname, net=False, dataframe=False, **kwargs):
"""
Method to read a zonebudget output file into a recarray or pandas
dataframe
Parameters
----------
fname : str
zonebudget output file name
net : bool
boolean flag for net budget
dataframe : bool
boolean flag to return a pandas dataframe
**kwargs
pivot : bool
start_datetime : str
Datetime string indicating the time at which the simulation
starts. Can be used when pandas dataframe is requested
timeunit : str
String that indicates the time units used in the model.
Returns
-------
np.recarray
"""
otype = ZoneBudget._get_otype(fname)
if otype == 0:
recarray = _read_zb_zblst(fname)
elif otype == 1:
recarray = _read_zb_csv(fname)
else:
add_prefix = kwargs.pop("add_prefix", True)
recarray = _read_zb_csv2(fname, add_prefix=add_prefix)
zonenamdict = {
int(i.split("_")[-1]): i
for i in recarray.dtype.names
if i.startswith("ZONE")
}
pivot = kwargs.pop("pivot", False)
recarray = _get_budget(recarray, zonenamdict, net=net)
if pivot:
recarray = _pivot_recarray(recarray)
if not dataframe:
return recarray
else:
start_datetime = kwargs.pop("start_datetime", None)
timeunit = kwargs.pop("timeunit", "D")
return _recarray_to_dataframe(
recarray,
zonenamdict,
start_datetime=start_datetime,
timeunit=timeunit,
pivot=pivot,
)
@classmethod
def read_zone_file(cls, fname):
"""Method to read a zonebudget zone file into memory
Parameters
----------
fname : str
zone file name
Returns
-------
zones : np.array
"""
with open(fname, "r") as f:
lines = f.readlines()
# Initialize layer
lay = 0
# Initialize data counter
totlen = 0
i = 0
# First line contains array dimensions
dimstring = lines.pop(0).strip().split()
nlay, nrow, ncol = [int(v) for v in dimstring]
zones = np.zeros((nlay, nrow, ncol), dtype=np.int32)
# The number of values to read before placing
# them into the zone array
datalen = nrow * ncol
# List of valid values for LOCAT
locats = ["CONSTANT", "INTERNAL", "EXTERNAL"]
# ITERATE OVER THE ROWS
for line in lines:
rowitems = line.strip().split()
# Skip blank lines
if len(rowitems) == 0:
continue
# HEADER
if rowitems[0].upper() in locats:
vals = []
locat = rowitems[0].upper()
if locat == "CONSTANT":
iconst = int(rowitems[1])
else:
fmt = rowitems[1].strip("()")
fmtin, iprn = [int(v) for v in fmt.split("I")]
# ZONE DATA
else:
if locat == "CONSTANT":
vals = np.ones((nrow, ncol), dtype=int) * iconst
lay += 1
elif locat == "INTERNAL":
# READ ZONES
rowvals = [int(v) for v in rowitems]
s = "Too many values encountered on this line."
assert len(rowvals) <= fmtin, s
vals.extend(rowvals)
elif locat == "EXTERNAL":
# READ EXTERNAL FILE
fname = rowitems[0]
if not os.path.isfile(fname):
errmsg = f'Could not find external file "{fname}"'
raise Exception(errmsg)
with open(fname, "r") as ext_f:
ext_flines = ext_f.readlines()
for ext_frow in ext_flines:
ext_frowitems = ext_frow.strip().split()
rowvals = [int(v) for v in ext_frowitems]
vals.extend(rowvals)
if len(vals) != datalen:
errmsg = (
"The number of values read from external "
'file "{}" does not match the expected '
"number.".format(len(vals))
)
raise Exception(errmsg)
else:
# Should not get here
raise Exception(f"Locat not recognized: {locat}")
# IGNORE COMPOSITE ZONES
if len(vals) == datalen:
# place values for the previous layer into the zone array
vals = | np.array(vals, dtype=int) | numpy.array |
"""
this module contains a class that fits a gaussian model to the central
part of an histogram, following schwartzman et al, 2009. This is
typically necessary to estimate a fdr when one is not certain that the
data behaves as a standard normal under H_0.
Author : <NAME>, 2008-2009
"""
# For scipy import
import numpy as np
from numpy.linalg import pinv
from mvpa2.base import externals
if externals.exists("scipy", raise_=True):
import scipy.stats as st
class FDR(object):
"""
This is the basic class to handle false discovery rate computation
parameter:
fdr.x the samples from which the fdr is derived
x is assumed to be a normal variate
The Benjamini-Horchberg procedure is used
"""
def __init__(self, x):
"""
x is assumed to be a 1-d array
"""
self.x = np.squeeze(x)
def all_fdr(self, x=None, verbose=0):
"""
Returns all the FDR (false discovery rates) values for the sample x
Parameters
-----------
x : ndarray of shape (n)
The normal variates
Results
-------
fdr : ndarray of shape (n)
The set of all FDRs
"""
if x is None:
x = self.x
pvals = st.norm.sf(x)
return self.all_fdr_from_pvals(pvals, verbose)
def all_fdr_from_pvals(self, pv, verbose=0):
"""
Returns the fdr associated with each the values
Parameters
-----------
pv : ndarray of shape (n)
The samples p-value
Returns
--------
q : array of shape(n)
The corresponding fdrs
"""
pv = self.check_pv(pv)
if pv is None:
pv = self.pv
n = np.size(pv)
isx = np.argsort(pv)
q = np.zeros(n)
for ip in range(n):
q[isx[ip]] = np.minimum(
1, np.maximum(n * pv[isx[ip]] / (ip + 1), q[isx[ip]])
)
if ip < n - 1:
q[isx[ip + 1]] = q[isx[ip]]
if verbose:
import matplotlib.pylab as mp
mp.figure()
mp.plot(pv, q, ".")
return q
def check_pv(self, pv):
"""
Do some basic checks on the pv array: each value should be within [0,1]
Parameters
----------
pv : array of shape (n)
The sample p-values
Returns
--------
pv : array of shape (n)
The sample p-values
"""
pv = np.squeeze(pv)
if pv.min() < 0:
print(pv.min())
raise ValueError("Negative p-values")
if pv.max() > 1:
print(pv.max())
raise ValueError("P-values greater than 1!")
return pv
def pth_from_pvals(self, pv, alpha=0.05):
"""
Given a set pv of p-values, returns the critical
p-value associated with an FDR alpha
Parameters
-----------
alpha : float
The desired FDR significance
pv : array of shape (n)
The samples p-value
Returns
-------
pth: float
The p value corresponding to the FDR alpha
"""
pv = self.check_pv(pv)
npv = np.size(pv)
pcorr = alpha / npv
spv = np.sort(pv)
ip = 0
pth = 0.0
while (spv[ip] < pcorr * (ip + 1)) & (ip < npv):
pth = spv[ip]
ip = ip + 1
return pth
def threshold_from_student(self, df, alpha=0.05, x=None):
"""
Given an array t of student variates with df dofs, returns the
critical p-value associated with alpha.
Parameters
-----------
df : float
The number of degrees of freedom
alpha : float, optional
The desired significance
x : ndarray, optional
The variate. By default self.x is used
Returns
--------
th : float
The threshold in variate value
"""
df = float(df)
if x is None:
x = self.x
pvals = st.t.sf(x, df)
pth = self.pth_from_pvals(pvals, alpha)
return st.t.isf(pth, df)
def threshold(self, alpha=0.05, x=None):
"""
Given an array x of normal variates, this function returns the
critical p-value associated with alpha.
x is explicitly assumed to be normal distributed under H_0
Parameters
-----------
alpha: float, optional
The desired significance, by default 0.05
x : ndarray, optional
The variate. By default self.x is used
Returns
--------
th : float
The threshold in variate value
"""
if x is None:
x = self.x
pvals = st.norm.sf(x)
pth = self.pth_from_pvals(pvals, alpha)
return st.norm.isf(pth)
class ENN(object):
"""
Class to compute the empirical null normal fit to the data.
The data which is used to estimate the FDR, assuming a gaussian null
from Schwartzmann et al., NeuroImage 44 (2009) 71--82
"""
def __init__(self, x):
"""
Initiate an empirical null normal object.
Parameters
-----------
x : 1D ndarray
The data used to estimate the empirical null.
"""
x = np.reshape(x, (-1,))
self.x = np.sort(x)
self.n = np.size(x)
self.learned = 0
def learn(self, left=0.2, right=0.8):
"""
Estimate the proportion, mean and variance of a gaussian distribution
for a fraction of the data
Parameters
-----------
left : float, optional
Left cut parameter to prevent fitting non-gaussian data
right : float, optional
Right cut parameter to prevent fitting non-gaussian data
Notes
------
This method stores the following attributes:
* mu = mu
* p0 = min(1, np.exp(lp0))
* sqsigma : standard deviation of the estimated normal
distribution
* sigma = np.sqrt(sqsigma) : variance of the estimated
normal distribution
"""
# take a central subsample of x
x = self.x[int(self.n * left) : int(self.n * right)]
# generate the histogram
step = 3.5 * np.std(self.x) / np.exp(np.log(self.n) / 3)
bins = int(max(10, (self.x.max() - self.x.min()) / step))
hist, ledge = np.histogram(x, bins=bins)
# I think there was a change in some numpy version on what to return
assert len(ledge) in (bins, bins + 1)
if len(ledge) == bins + 1:
# we are interested in left edges
ledge = ledge[:bins]
step = ledge[1] - ledge[0]
medge = ledge + 0.5 * step
# remove null bins
whist = hist > 0
hist = hist[whist]
medge = medge[whist]
hist = hist.astype("f")
# fit the histogram
DMtx = np.ones((3, np.sum(whist)))
DMtx[1] = medge
DMtx[2] = medge ** 2
coef = np.dot(np.log(hist), pinv(DMtx))
sqsigma = -1.0 / (2 * coef[2])
mu = coef[1] * sqsigma
lp0 = (
coef[0]
- np.log(step * self.n)
+ 0.5 * | np.log(2 * np.pi * sqsigma) | numpy.log |
from abc import ABC, abstractmethod
import numpy as np
from scipy.stats.mstats import gmean
from dataset.fuzzy_sets import IntervalValuedFuzzySet
class Aggregation(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
"""
:param fuzzy_sets: a numpy array holding fuzzy sets represented directly as numpy arrays
:return: a fuzzy set, a numpy array result of aggregation
"""
pass
def aggregate_interval_valued_fuzzy_sets(self, fuzzy_sets):
"""
:param fuzzy_sets: a numpy array holding fuzzy sets as IntervalValuedFuzzySet class instances
:return: a fuzzy set, result of aggregation
"""
fuzzy_sets_as_numpy = np.array([f.numpy_representation for f in fuzzy_sets])
return self.aggregate_numpy_arrays_representation(fuzzy_sets_as_numpy)
@staticmethod
def change_aggregation_to_name(agg):
if isinstance(agg, A1Aggregation):
return 'A1'
if isinstance(agg, A2Aggregation):
return 'A2'
if isinstance(agg, A3Aggregation):
return 'A3'
if isinstance(agg, A4Aggregation):
return 'A4'
if isinstance(agg, A5Aggregation):
return 'A5'
if isinstance(agg, A6Aggregation):
return 'A6'
if isinstance(agg, A7Aggregation):
return 'A7'
if isinstance(agg, A8Aggregation):
return 'A8'
if isinstance(agg, A9Aggregation):
return 'A9'
if isinstance(agg, A10Aggregation):
return 'A10'
# aggregations names comes from paper
class A1Aggregation(Aggregation):
def __init__(self):
super().__init__()
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
return fuzzy_sets.sum(axis=0) / fuzzy_sets.shape[0]
class A2Aggregation(Aggregation):
def __init__(self):
super().__init__()
def _f(self, sum, upper, lower, n):
sum -= upper
sum += lower
return sum / n
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
summed = fuzzy_sets.sum(axis=0)
t = np.array([self._f(summed[1], f[1], f[0], fuzzy_sets.shape[0]) for f in fuzzy_sets])
#print(t)
return np.array([summed[0] / fuzzy_sets.shape[0], np.max(t)])
class A3Aggregation(Aggregation):
def __init__(self):
super().__init__()
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
summed = fuzzy_sets.sum(axis=0)
# division by zero, here 0/0 = 0
if summed[1] == 0:
return np.array([summed[0] / fuzzy_sets.shape[0], 0])
# standard way
squared = np.square(fuzzy_sets[:, 1])
return np.array([summed[0] / fuzzy_sets.shape[0], np.sum(squared, axis=0) / summed[1]])
class A4Aggregation(Aggregation):
def __init__(self, p):
super().__init__()
self.p = p
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
summed = fuzzy_sets.sum(axis=0)
# division by zero, here 0/0 = 0
if summed[1] == 0:
return np.array([summed[0] / fuzzy_sets.shape[0], 0])
# standard way
powered = np.power(fuzzy_sets[:, 1], self.p)
powered_minus_one = np.power(fuzzy_sets[:, 1], self.p - 1)
#print('powered', powered)
return np.array([summed[0] / fuzzy_sets.shape[0], np.sum(powered, axis=0) / np.sum(powered_minus_one)])
class A5Aggregation(Aggregation):
def __init__(self):
super().__init__()
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
lower = np.square(fuzzy_sets[:, 0])
upper = np.power(fuzzy_sets[:, 1], 3)
n = fuzzy_sets.shape[0]
return np.array([np.sqrt(lower.sum(axis=0) / n), np.sqrt(upper.sum(axis=0) / n)])
class A6Aggregation(Aggregation):
def __init__(self):
super().__init__()
def aggregate_numpy_arrays_representation(self, fuzzy_sets):
lower = | np.power(fuzzy_sets[:, 0], 3) | numpy.power |
"""
Implement optics algorithms for optical phase tomography using GPU
<NAME> <EMAIL>
<NAME> <EMAIL>
October 22, 2018
"""
import numpy as np
import arrayfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for all parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 norm
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_real = (False, "larger")
self.positivity_imag = (False, "larger")
self.pure_real = False
self.pure_imag = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
slice_separation: For multislice algorithms, how far apart are slices separated, array (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, slice_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * np.ones(shape, dtype = np_complex_datatype) if RI_obj is None else RI_obj.astype(np_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if slice_separation is not None:
#for discontinuous slices
assert len(slice_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.slice_separation = np.asarray(slice_separation).astype(np_float_datatype)
else:
#for continuous slices
self.slice_separation = self.pixel_size_z * np.ones((shape[2]-1,), dtype = np_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.trans_obj = np.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * np.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.real/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imag/k0**2/2.0)**2
RI_obj_real = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imag = -0.5 * self.V_obj.imag/k0**2/RI_obj_real
self.RI_obj = RI_obj_real + 1.0j * RI_obj_imag
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illumination angles in x, default = [0] (on axis)
fy_illu_list: illumination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illumination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illumination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_inplace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before calling, make sure correct object is contained
"""
obj_gpu = af.to_array(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.append([])
if self._scattering_obj.back_scatter:
back_scattered_predict.append([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].append(np.array(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append(np.array(fields["back_scattered_field"]))
else:
forward_scattered_predict[rot_idx].append(np.abs(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append(np.abs(fields["back_scattered_field"]))
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, -1.0*self.rot_angles[rot_idx])
if len(forward_scattered_predict[0][0].shape)==2:
forward_scattered_predict = np.array(forward_scattered_predict).transpose(2, 3, 1, 0)
elif len(forward_scattered_predict[0][0].shape)==3:
forward_scattered_predict = np.array(forward_scattered_predict).transpose(2, 3, 4, 1, 0)
if self._scattering_obj.back_scatter:
if len(back_scattered_predict[0][0].shape)==2:
back_scattered_predict = np.array(back_scattered_predict).transpose(2, 3, 1, 0)
elif len(back_scattered_predict[0][0].shape)==3:
back_scattered_predict = np.array(back_scattered_predict).transpose(2, 3, 4, 1, 0)
return forward_scattered_predict, back_scattered_predict
else:
return forward_scattered_predict
def checkGradient(self, delta = 1e-4):
"""
check if the numerical gradient is similar to the analytical gradient. Only works for 64 bit data type.
"""
assert af_float_datatype == af.Dtype.f64, "This will only be accurate if 64 bit datatype is used!"
shape = self.phase_obj_3d.shape
point = (np.random.randint(shape[0]), np.random.randint(shape[1]), np.random.randint(shape[2]))
illu_idx = np.random.randint(len(self.fx_illu_list))
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
x = np.ones(shape, dtype = np_complex_datatype)
if self._defocus_obj.pad:
amplitude = af.randu(shape[0]//2, shape[1]//2, dtype = af_float_datatype)
else:
amplitude = af.randu(shape[0], shape[1], dtype = af_float_datatype)
print("testing the gradient at point : ", point)
def func(x0):
fields = self._scattering_obj.forward(x0, fx_illu, fy_illu)
field_scattered = self._aberration_obj.forward(fields["forward_scattered_field"])
field_measure = self._defocus_obj.forward(field_scattered, self.prop_distances)
residual = af.abs(field_measure) - amplitude
function_value = af.sum(residual*af.conjg(residual)).real
return function_value
numerical_gradient = calculateNumericalGradient(func, x, point, delta = delta)
fields = self._scattering_obj.forward(x, fx_illu, fy_illu)
forward_scattered_field = fields["forward_scattered_field"]
cache = fields["cache"]
forward_scattered_field = self._aberration_obj.forward(forward_scattered_field)
field_measure = self._defocus_obj.forward(forward_scattered_field, self.prop_distances)
analytical_gradient = self._computeGradient(field_measure, amplitude, cache)[point]
print("numerical gradient: %5.5e + %5.5e j" %(numerical_gradient.real, numerical_gradient.imag))
print("analytical gradient: %5.5e + %5.5e j" %(analytical_gradient.real, analytical_gradient.imag))
def _forwardMeasure(self, fx_illu, fy_illu, obj = None):
"""
From an illumination angle, this function computes the exit wave.
fx_illu, fy_illu: illumination angle in x and y (scalars)
obj: object to be passed through (Optional, default pick from phase_obj_3d)
"""
if obj is None:
fields = self._scattering_obj.forward(self._x, fx_illu, fy_illu)
else:
fields = self._scattering_obj.forward(obj, fx_illu, fy_illu)
field_scattered = self._aberration_obj.forward(fields["forward_scattered_field"])
field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances)
fields["forward_scattered_field"] = field_scattered
if self._scattering_obj.back_scatter:
field_scattered = self._aberration_obj.forward(fields["back_scattered_field"])
field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances)
fields["back_scattered_field"] = field_scattered
return fields
def _computeGradient(self, field_measure, amplitude, cache):
"""
Error backpropagation to return a gradient
field_measure: exit wave computed in forward model
amplitude: amplitude measured
cache: exit wave at each layer, saved previously
"""
field_bp = field_measure - amplitude*af.exp(1.0j*af.arg(field_measure))
field_bp = self._defocus_obj.adjoint(field_bp, self.prop_distances)
field_bp = self._aberration_obj.adjoint(field_bp)
gradient = self._scattering_obj.adjoint(field_bp, cache)
return gradient["gradient"]
def _initialization(self,configs, x_init = None):
"""
Initialize algorithm
configs: configs object from class AlgorithmConfigs
x_init: initial guess of object
"""
if x_init is None:
if self.scat_model is "MultiTrans":
self._x[:, :, :] = 1.0
else:
self._x[:, :, :] = 0.0
else:
self._x[:, :, :] = x_init
def _solveFirstOrderGradient(self, configs, amplitudes, verbose):
"""
MAIN part of the solver, runs the FISTA algorithm
configs: configs object from class AlgorithmConfigs
amplitudes: all measurements
verbose: boolean variable to print verbosely
"""
flag_FISTA = False
if configs.method == "FISTA":
flag_FISTA = True
# update multiple angles at a time
batch_update = False
if configs.fista_global_update or configs.batch_size != 1:
gradient_batch = af.constant(0.0, self.phase_obj_3d.shape[0],\
self.phase_obj_3d.shape[1],\
self.phase_obj_3d.shape[2], dtype = af_complex_datatype)
batch_update = True
if configs.fista_global_update:
configs.batch_size = 0
#TODO: what if num_batch is not an integer
if configs.batch_size == 0:
num_batch = 1
else:
if self.number_rot < 2:
num_batch = self.number_illum // configs.batch_size
else:
num_batch = self.number_rot // configs.batch_size
stepsize = configs.stepsize
max_iter = configs.max_iter
reg_term = configs.reg_term
configs.error = []
obj_gpu = af.constant(0.0, self.phase_obj_3d.shape[0],\
self.phase_obj_3d.shape[1],\
self.phase_obj_3d.shape[2], dtype = af_complex_datatype)
#Initialization for FISTA update
if flag_FISTA:
restart = configs.restart
y_k = self._x.copy()
t_k = 1.0
#Start of iterative algorithm
with contexttimer.Timer() as timer:
if verbose:
print("---- Start of the %5s algorithm ----" %(self.scat_model))
for iteration in range(max_iter):
cost = 0.0
obj_gpu[:] = af.to_array(self._x)
if configs.random_order:
rot_order = np.random.permutation(range(self.number_rot))
illu_order = np.random.permutation(range(self.number_illum))
else:
rot_order = range(self.number_rot)
illu_order = range(self.number_illum)
for batch_idx in range(num_batch):
if batch_update:
gradient_batch[:,:,:] = 0.0
if configs.batch_size == 0:
rot_indices = rot_order
illu_indices = illu_order
else:
if self.number_rot < 2:
rot_indices = rot_order
illu_indices = illu_order[batch_idx * configs.batch_size : (batch_idx+1) * configs.batch_size]
else:
illu_indices = illu_order
rot_indices = rot_order[batch_idx * configs.batch_size : (batch_idx+1) * configs.batch_size]
for rot_idx in rot_indices:
# Rotate the object
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
if batch_update:
self._rot_obj.rotate(gradient_batch, self.rot_angles[rot_idx])
for illu_idx in illu_indices:
#forward scattering
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
field_measure = fields["forward_scattered_field"]
cache = fields["cache"]
#calculate error
amplitude = af.to_array(amplitudes[:,:,:,illu_idx, rot_idx])
residual = af.abs(field_measure) - amplitude
cost += af.sum(residual*af.conjg(residual)).real
#calculate gradient
if batch_update:
gradient_batch[:, :, :] += self._computeGradient(field_measure, amplitude, cache)
else:
obj_gpu[:, :, :] -= stepsize * self._computeGradient(field_measure, amplitude, cache)
field_measure = None
cache = None
amplitude = None
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, -1.0*self.rot_angles[rot_idx])
if batch_update:
self._rot_obj.rotate_adj(gradient_batch, self.rot_angles[rot_idx])
if batch_update:
obj_gpu[:, :, :] -= stepsize * gradient_batch
if | np.isnan(obj_gpu) | numpy.isnan |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import warnings
from itertools import combinations_with_replacement as combinations_w_r
from distutils.version import LooseVersion
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import boxcox, nanpercentile, nanmedian
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'PowerTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
'power_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,
force_all_finite="allow-nan")
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES, force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray or None, shape (n_features,)
Per feature relative scaling of the data. Equal to ``None`` when
``with_std=False``.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray or None, shape (n_features,)
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray or None, shape (n_features,)
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_samples_seen_ : int or array, shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are not missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, <NAME>., <NAME>, and <NAME>. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
if (hasattr(self, 'n_samples_seen_') and
isinstance(self.n_samples_seen_, (int, np.integer))):
self.n_samples_seen_ = np.repeat(self.n_samples_seen_,
X.shape[1]).astype(np.int64)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
sparse_constructor = (sparse.csr_matrix
if X.format == 'csr' else sparse.csc_matrix)
counts_nan = sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr),
shape=X.shape).sum(axis=0).A.ravel()
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = (X.shape[0] -
counts_nan).astype(np.int64)
if self.with_std:
# First pass
if not hasattr(self, 'scale_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
if hasattr(self, 'scale_'):
self.n_samples_seen_ += X.shape[0] - counts_nan
else:
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(X.shape[1], dtype=np.int64)
# First pass
if not hasattr(self, 'scale_'):
self.mean_ = .0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if | np.ptp(self.n_samples_seen_) | numpy.ptp |
from __future__ import division
import numpy, pandas
from scipy.signal import butter
from scipy import interpolate
import scipy
import csv
import click
import sys, os, re, pprint
from scipy.optimize import curve_fit
from scipy.fftpack import fft
from scipy.signal import butter, lfilter, find_peaks_cwt, detrend, periodogram, remez, iirfilter
from scipy.interpolate import CubicSpline, interp1d, UnivariateSpline
from src.utils import metadataExtractor, cxpPrinter
import collections
def gcamp_interpolate(gcamp, number_of_additional_timepoints):
gcamp_len = len(gcamp)
timelabels = range(0, gcamp_len)
cs = scipy.interpolate.CubicSpline(timelabels, gcamp)
timelabels_spline = numpy.arange(0, gcamp_len-1, 1/number_of_additional_timepoints)
gcamp_spline = cs(timelabels_spline)
return gcamp_spline
def gcamp_normalize(gcamp, gcamp_min, gcamp_max):
# signal min already remove during extraction
return numpy.asarray(gcamp) / (gcamp_max - gcamp_min)
def gcamp_fwhm(gcamp, window_length, peak_ind, original_gcamp_length):
win_rise = peak_ind - window_length if peak_ind >= window_length else 0
win_fall = peak_ind + window_length + 1 if peak_ind < len(gcamp) - window_length else len(gcamp)
gcamp_windowed = gcamp[win_rise:win_fall] # look for a minimum within the window
# argrelextrema requires an *order* less than or equal to half the length of the input array
if window_length > len(gcamp_windowed) / 2:
min_ind = scipy.signal.argrelextrema(gcamp_windowed, numpy.less,
order=numpy.floor(len(gcamp_windowed) / 2).astype(int))
else:
min_ind = scipy.signal.argrelextrema(gcamp_windowed, numpy.less, order=window_length)
if len(min_ind[0]) == 0:
min_ind = numpy.where(gcamp_windowed == numpy.min(gcamp_windowed))
fwhm_cutoff = (gcamp[peak_ind] - numpy.min(gcamp_windowed[min_ind])) / 2 + numpy.min(gcamp_windowed[min_ind])
window_length_expanded = window_length * 2 # after determining a cutoff expand the search in case of assymettry between rise and fall
# a fold change of 2 implies the decay of a signal could take twice as long as the activation of length *window_length*
# alternatively, the entire time-series could be searched. This might be better since processing costs for this length of signal are neglgible
win_rise_expanded = peak_ind - window_length_expanded if peak_ind >= window_length_expanded else 0
win_fall_expanded = peak_ind + window_length_expanded + 1 if peak_ind < len(
gcamp) - window_length_expanded else len(gcamp)
gcamp_windowed_expanded = gcamp[win_rise_expanded:win_fall_expanded]
peak_ind_expanded = peak_ind - win_rise_expanded
# There are special cases when the signal in the window does not reach the *fwhm_cutoff*.
# When this happens the fwhm will just use the ends of the window.
# The first point past the cutoff is chosen by numpy.min() and numpy.max().
# To choose the closest index, the first point just before the closet index must also be considered.
fwhm_rise_ind = numpy.where(gcamp_windowed_expanded[:peak_ind_expanded] < fwhm_cutoff)
if len(fwhm_rise_ind[0]) == 0:
fwhm_rise = peak_ind - win_rise_expanded
else:
fwhm_riseA = numpy.asscalar(peak_ind_expanded - numpy.max(fwhm_rise_ind))
fwhm_rise_testA = abs(gcamp_windowed_expanded[peak_ind_expanded - fwhm_riseA] - fwhm_cutoff)
fwhm_rise_testB = abs(gcamp_windowed_expanded[peak_ind_expanded - fwhm_riseA + 1] - fwhm_cutoff)
fwhm_rise = fwhm_riseA if fwhm_rise_testA <= fwhm_rise_testB else fwhm_riseA - 1
fwhm_fall_ind = numpy.where(gcamp_windowed_expanded[peak_ind_expanded:] < fwhm_cutoff)
if len(fwhm_fall_ind[0]) == 0:
fwhm_fall = win_fall_expanded - peak_ind - 1 # the *-1* is to correct for an offset
else:
fwhm_fallA = numpy.asscalar(numpy.min(fwhm_fall_ind))
fwhm_fall_testA = abs(gcamp_windowed_expanded[fwhm_fallA + peak_ind_expanded] - fwhm_cutoff)
fwhm_fall_testB = abs(gcamp_windowed_expanded[fwhm_fallA + peak_ind_expanded - 1] - fwhm_cutoff)
fwhm_fall = fwhm_fallA if fwhm_fall_testA <= fwhm_fall_testB else fwhm_fallA - 1
# fwhm_rise and fwhm_fall should be greater than zero
fwhm_rise = 1 if fwhm_rise == 0 else fwhm_rise
fwhm_fall = 1 if fwhm_fall == 0 else fwhm_fall
# peak width
peak_start_ind = (peak_ind - fwhm_rise) if (peak_ind - fwhm_rise) > 0 else 0
peak_end_ind = (peak_ind + fwhm_fall) if (peak_ind + fwhm_fall) < len(gcamp) else len(gcamp)-1
peak_width = peak_end_ind - peak_start_ind # same as fwhm_rise + fwhm_fall
# area under the curve (area under the peak only)
area_under_curve = numpy.trapz(gcamp[peak_start_ind:peak_end_ind+1], dx=original_gcamp_length/len(gcamp))
return fwhm_rise, fwhm_fall, fwhm_cutoff, peak_width, area_under_curve
# To find in array the element closest to value
def find_nearest(array,value,startIdx,endIdx):
if endIdx < len(array)-1:
endIdx = endIdx+1
idx = (numpy.abs(array[startIdx:endIdx]-value)).argmin() + startIdx
return idx
# - To obtain half maximum points, peak start/end, height
# - Half max data not used currently, this method also returns other important
# metrics such as peak height, etc.
def getPeakDefiningPoints(signal, peaks, valleys, wellmin):
half_maximums, peak_halfmax_starts, peak_halfmax_ends = [],[],[] # halfmax values (halfmax,halfmax start, halfmax end)
peak_rise_starts, peak_fall_ends= [],[]
peak_heights_localmin, peak_heights_signalmin, peak_heights_wellmin = [],[],[]
for idx,peak in enumerate(peaks):
# Step 1: Get valleys between previous and current peak
if len(peaks) > 1 and idx > 0:
valleys_considered = valleys[(valleys > peaks[idx - 1]) & (valleys < peak)]
else:
valleys_considered = valleys[(valleys < peak)]
# Step 2: Determine peak start index
if len(valleys_considered) > 0:
peak_start = valleys_considered[-1] # 1st valley to the left of current peak
else:
peak_start = 0
peak_rise_starts.append(peak_start)
# Step 3: Determine peak end idx
if idx <= len(peaks) - 2: # if there is at least 1 more peak in peaks
# valleys between current and next peak
nextValleys = valleys[(valleys > peak) & (valleys < peaks[idx + 1])]
else:
# valleys between current peak and end of signal
nextValleys = valleys[(valleys > peak) & (valleys < (len(signal)-1))]
# take 1st valley to the right of current peak
if len(nextValleys) > 0:
peak_end = nextValleys[0]
else:
peak_end = len(signal) - 1
peak_fall_ends.append(peak_end)
# Step 4: Compute halfmax and approximate corresponding halfmax start/end index
halfmax = (max(signal[peak] - signal[peak_start], signal[peak] - signal[peak_end]))/2.0 + signal[peak_start]
half_maximums.append(halfmax)
halfmax_start = find_nearest(signal, halfmax, peak_start, peak)
peak_halfmax_starts.append(halfmax_start)
peak_halfmax_ends.append(find_nearest(signal, signal[halfmax_start], peak, peak_end))
# Step 5: Compute peak height
# Method 1: Difference between gcamp signal and minimum value of that same gcamp signal.
peakheight_signalmin = signal[peak] - min(signal)
peak_heights_signalmin.append(peakheight_signalmin)
# Method 2: Difference between gcamp signal and local minimum of the peak under analysis.
peakheight_localmin = max(signal[peak] - signal[peak_start], signal[peak] - signal[peak_end])
peak_heights_localmin.append(peakheight_localmin)
# Method 3: Difference between gcamp signal and minimum gcamp value (avg background intensity of well)
# This difference correspond to the height of the signal itself as it is corrected for background intensity already.
peakheight_wellmin = signal[peak]
peak_heights_wellmin.append(peakheight_wellmin)
return half_maximums, peak_halfmax_starts, peak_halfmax_ends, peak_rise_starts, peak_fall_ends, peak_heights_signalmin, peak_heights_localmin, peak_heights_wellmin
def wavelet_peak(gcamp, max_scale, min_length_0, min_snr_0, noise_perc_0):
widths = numpy.arange(1,max_scale,1)
peakind = find_peaks_cwt(detrend(gcamp), widths, max_distances=widths/2, gap_thresh=3, min_length=min_length_0, min_snr=min_snr_0, noise_perc=noise_perc_0)
if len(peakind) == 0:
peakind = [0]
return peakind
"""
x: signal
min_peak_height: anything smaller than that will be rejected
edge: {'rising','falling','both'} --> determine which indices to keep for irregular peaks, plateaus, etc.
valley: if true, will returns indices of valleys instead of peaks
min_rel_height_neighbor: specifies a minimum relative height difference between peaks and their immediate neighbors
min_peak_distance: minimum distance that must separate each peak for them to be valid
keep_peaks_same_height: keep peaks of same height even if closer than min_peak_distance
Returns indices of identified peaks
"""
def find_peaks(x, min_peak_height=None, edge='rising', valley=False, min_rel_height_neighbor=0, min_peak_distance=1,
keep_peaks_same_height=False):
# need at least 3 points to identify valid peaks
if x.size < 3:
return numpy.array([], dtype=int)
# if looking for valleys, invert the signal and look for peaks
if valley:
x = -x
# identify the different types of peaks
dx = numpy.diff(x)
singlePointPeaks, risingEdgePeaks, fallingEdgePeaks = numpy.array([[], [], []], dtype=int)
if not edge:
singlePointPeaks = numpy.where((numpy.hstack((dx, 0)) < 0) & (numpy.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
risingEdgePeaks = numpy.where((numpy.hstack((dx, 0)) <= 0) & (numpy.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
fallingEdgePeaks = numpy.where((numpy.hstack((dx, 0)) < 0) & (numpy.hstack((0, dx)) >= 0))[0]
ind = numpy.unique(numpy.hstack((singlePointPeaks, risingEdgePeaks, fallingEdgePeaks)))
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# keep only peaks > minimum peak height
if ind.size and min_peak_height is not None:
ind = ind[x[ind] >= min_peak_height]
# remove peaks that are less than "neighbor_threshold" higher than their neighbors
if ind.size and min_rel_height_neighbor > 0:
dx_neighbors = numpy.min(numpy.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
ind = numpy.delete(ind, numpy.where(dx_neighbors < min_rel_height_neighbor)[0])
# identify peaks closer to one another than min_peak_distance
if ind.size and min_peak_distance > 1:
ind = ind[numpy.argsort(x[ind])][::-1] # sort ind by peak height
idel = numpy.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - min_peak_distance) & (ind <= ind[i] + min_peak_distance) \
& (x[ind[i]] > x[ind] if keep_peaks_same_height else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indexes by their occurrence
ind = numpy.sort(ind[~idel])
return ind
# Returns wavelet analysis and periodogram stored in ordered dictionary
def wavelet_periodogram_extraction(gcamp, original_gcamp_length):
d = collections.OrderedDict()
# correction factor to account for interpolation
correction_factor = original_gcamp_length / len(gcamp)
# Wavelet 8 (8 is better as a general peak identifier)
window_length_wavelet8 = 15
peak_ind_wavelet8 = wavelet_peak(gcamp, 8, 5, 2, 10)
# peak_ind_wavelet8 = [i for i in peak_ind_wavelet8 if gcamp[i] >= threshold]
if len(peak_ind_wavelet8) == 0 or (len(peak_ind_wavelet8) == 1 and peak_ind_wavelet8[0] == 0):
d["wavelet8_peak_count"] = 0
d["wavelet8_firing_rate"] = 0
else:
# full-width half-maximum computations
fwhm_wavelet8 = [gcamp_fwhm(gcamp, window_length_wavelet8, pk, original_gcamp_length) for pk in
peak_ind_wavelet8]
fwhm_wavelet8_arr = numpy.asarray(fwhm_wavelet8)
fwhm_wavelet8_average = numpy.average(fwhm_wavelet8_arr, 0)
fwhm_wavelet8_sum = numpy.sum(fwhm_wavelet8_arr, 0) # used for total AUC
# add features to dictionary
d["wavelet8_peak_count"] = len(peak_ind_wavelet8)
d["wavelet8_firing_rate"] = len(peak_ind_wavelet8) / original_gcamp_length
d["wavelet8_amplitude"] = numpy.mean(gcamp[peak_ind_wavelet8])
d["wavelet8_fwhm_rise_time"] = fwhm_wavelet8_average[0] * correction_factor
d["wavelet8_fwhm_fall_time"] = fwhm_wavelet8_average[1] * correction_factor
d["wavelet8_fwhm_cutoff"] = fwhm_wavelet8_average[2]
d["wavelet8_fwhm_peak_width"] = fwhm_wavelet8_average[3] * correction_factor
d["wavelet8_fwhm_area_under_curve"] = fwhm_wavelet8_average[4]
d["wavelet8_fwhm_total_area_under_curve"] = fwhm_wavelet8_sum[4]
d["wavelet8_fwhm_rise_fall_ratio_mean"] = numpy.mean(
numpy.divide(fwhm_wavelet8_arr[:, 0], fwhm_wavelet8_arr[:, 1]))
if len(peak_ind_wavelet8) > 1:
d["wavelet8_spike_interval_mean"] = numpy.mean(numpy.diff(peak_ind_wavelet8) * correction_factor)
d["wavelet8_spike_interval_var"] = numpy.var(numpy.diff(peak_ind_wavelet8) * correction_factor)
# Wavelet 4 (4 is good for identifying peaks of smaller amplitude)
window_length_wavelet4 = 9
peak_ind_wavelet4 = wavelet_peak(gcamp, 4, 3, 1, 10)
if len(peak_ind_wavelet4) == 0 or (len(peak_ind_wavelet4) == 1 and peak_ind_wavelet4[0] == 0):
d["wavelet4_peak_count"] = 0
d["wavelet4_firing_rate"] = 0
else:
# full-width half-maximum computations
fwhm_wavelet4 = [gcamp_fwhm(gcamp, window_length_wavelet4, pk, original_gcamp_length) for pk in
peak_ind_wavelet4]
fwhm_wavelet4_arr = numpy.asarray(fwhm_wavelet4)
fwhm_wavelet4_average = numpy.average(fwhm_wavelet4_arr, 0)
fwhm_wavelet4_sum = numpy.sum(fwhm_wavelet4_arr, 0) # used for total AUC
# add features to dictionary
d["wavelet4_peak_count"] = len(peak_ind_wavelet4)
d["wavelet4_firing_rate"] = len(peak_ind_wavelet4) / original_gcamp_length
d["wavelet4_amplitude"] = numpy.mean(gcamp[peak_ind_wavelet4])
d["wavelet4_fwhm_rise_time"] = fwhm_wavelet4_average[0] * correction_factor
d["wavelet4_fwhm_fall_time"] = fwhm_wavelet4_average[1] * correction_factor
d["wavelet4_fwhm_cutoff"] = fwhm_wavelet4_average[2]
d["wavelet4_fwhm_peak_width"] = fwhm_wavelet4_average[3] * correction_factor
d["wavelet4_fwhm_area_under_curve"] = fwhm_wavelet4_average[4]
d["wavelet4_fwhm_total_area_under_curve"] = fwhm_wavelet4_sum[4]
d["wavelet4_fwhm_rise_fall_ratio_mean"] = numpy.mean(
numpy.divide(fwhm_wavelet4_arr[:, 0], fwhm_wavelet4_arr[:, 1]))
if len(peak_ind_wavelet4) > 1:
d["wavelet4_spike_interval_mean"] = numpy.mean(numpy.diff(peak_ind_wavelet4) * correction_factor)
d["wavelet4_spike_interval_var"] = numpy.var(numpy.diff(peak_ind_wavelet4) * correction_factor)
# Periodogram (Fourier Fast Transform, Power Spectral Density)
# For a typical 45-timepoint series, we expect 89 distinct frequencies along with their weights (aka power)
# The number of distinct frequency components can be computed using the following formula:
# numFreq = len(gcamp)/2 + 1
# If gcamp is non interpolated, then --> numFreq = ((len(gcamp)-1)*num_additional_points)/2 + 1
f, Pxx_den = scipy.signal.periodogram(scipy.signal.detrend(gcamp))
for index, power in enumerate(Pxx_den):
key = "periodogram_{0}".format(index)
d[key] = power
return d
# Gets gcamp features (processes a single object, i.e. a gcamp time series)
def gcamp_feature_extraction(gcamp,well,obj_number, thresholds, original_gcamp_length, platename, well_types_dict, wellmins):
# retrieve well type
well_type = 'unspecified' # default well type
for key in well_types_dict.keys():
if well in well_types_dict[key]:
well_type = key
break
# instantiate ordered dictionary
d = collections.OrderedDict()
d["plate"] = platename
d["well"] = well
d["well_type"] = well_type
d["obj_number"] = obj_number
# signal stats
d["signal_mean"] = numpy.mean(gcamp)
d["signal_var"] = | numpy.var(gcamp) | numpy.var |
"""Generic protoplanetary disk model
The density is given by
.. math::
\\rho = \\frac{\\Sigma(r,\\phi)}{H_p\\sqrt{(2\\pi)}} \\exp{\\left(-\\frac{z^2}{2H_p^2}\\right)}
* :math:`\Sigma` - surface density
* :math:`H_{\\rm p}` - Pressure scale height
There are two options for the functional form of surface density as a function of radius. For a simple
power-law the surface density is given by
* :math:`\Sigma(r) = \\Sigma_0\\left(\\frac{r}{r_{\\rm out}}\\right)^p`
alternatively the surface density can also have an exponential outer tapering:
* :math:`\Sigma(r) = \\Sigma_0\\left(\\frac{r}{r_{\\rm out}}\\right)^p\\exp{\\left\\{-\\left(\\frac{r}{r_{\\rm out}}\\right)^{2+p}\\right\\}}`
The molecular abundance function takes into account dissociation and freeze-out of the molecules
For photodissociation only the continuum (dust) shielding is taken into account in a way that
whenever the continuum optical depth radially drops below a threshold value the molecular abundance
is dropped to zero. For freeze-out the molecular abundance below a threshold temperature is decreased
by a given fractor.
"""
from __future__ import absolute_import
from __future__ import print_function
import warnings
import traceback
try:
import numpy as np
except ImportError:
np = None
print(' Numpy cannot be imported ')
print(' To use the python module of RADMC-3D you need to install Numpy')
print(traceback.format_exc())
from .. natconst import *
from .. import analyze
def getModelDesc():
"""Returns the brief description of the model.
"""
return "Generic protoplanetary disk model"
def getDefaultParams():
"""Function to provide default parameter values of the model.
Returns a list whose elements are also lists with three elements:
1) parameter name, 2) parameter value, 3) parameter description
All three elements should be strings. The string of the parameter
value will be directly written out to the parameter file if requested,
and the value of the string expression will be evaluated and be put
to radmc3dData.ppar. The third element contains the description of the
parameter which will be written in the comment field of the line when
a parameter file is written.
"""
defpar = [
['xres_nlev', '3', 'Number of refinement levels'],
['xres_nspan', '3', 'Number of the original grid cells to refine'],
['xres_nstep', '3', 'Number of grid cells to create in a refinement level'],
['nx', '[30,50]', 'Number of grid points in the first dimension'],
['xbound', '[1.0*au,1.05*au, 100.0*au]', 'Number of radial grid points'],
['ny', '[10,30,30,10]', 'Number of grid points in the first dimension'],
['ybound', '[0., pi/3., pi/2., 2.*pi/3., pi]', 'Number of radial grid points'],
['nz', '30', 'Number of grid points in the first dimension'],
['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],
['gasspec_mol_name', "['co']", ''],
['gasspec_mol_abun', '[1e-4]', ''],
['gasspec_mol_dbase_type', "['leiden']", ''],
['gasspec_mol_dissoc_taulim', '[1.0]', 'Continuum optical depth limit below which all molecules dissociate'],
['gasspec_mol_freezeout_temp', '[19.0]', 'Freeze-out temperature of the molecules in Kelvin'],
['gasspec_mol_freezeout_dfact', '[1e-3]',
'Factor by which the molecular abundance should be decreased in the frezze-out zone'],
['gasspec_vturb', '0.2e5', 'Microturbulent line width'],
['rin', '1.0*au', ' Inner radius of the disk'],
['rdisk', '100.0*au', ' Outer radius of the disk'],
['hrdisk', '0.1', ' Ratio of the pressure scale height over radius at hrpivot'],
['hrpivot', "100.0*au", ' Reference radius at which Hp/R is taken'],
['plh', '1./7.', ' Flaring index'],
['plsig1', '-1.0', ' Power exponent of the surface density distribution as a function of radius'],
['sig0', '0.0', ' Surface density at rdisk'],
['mdisk', '1e-3*ms', ' Mass of the disk (either sig0 or mdisk should be set to zero or commented out)'],
['bgdens', '1e-30', ' Background density (g/cm^3)'],
['srim_rout', '0.0', 'Outer boundary of the smoothing in the inner rim in terms of rin'],
['srim_plsig', '0.0', 'Power exponent of the density reduction inside of srim_rout*rin'],
['prim_rout', '0.0', 'Outer boundary of the puffed-up inner rim in terms of rin'],
['hpr_prim_rout', '0.0', 'Pressure scale height at rin'],
['gap_rin', '[0e0*au]', ' Inner radius of the gap'],
['gap_rout', '[0e0*au]', ' Outer radius of the gap'],
['gap_drfact', '[0e0]', ' Density reduction factor in the gap'],
['sigma_type', '0',
' Surface density type (0 - polynomial, 1 - exponential outer edge (viscous self-similar solution)'],
['dusttogas', '0.01', ' Dust-to-gas mass ratio']]
return defpar
def getDustDensity(grid=None, ppar=None):
"""Calculates the dust density distribution in a protoplanetary disk.
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and frequency/wavelength grid
ppar : dictionary
A dictionary containing all parameters of the model
Returns
-------
Returns the volume density in g/cm^3
"""
# Get the gas density
rhogas = getGasDensity(grid=grid, ppar=ppar)
rho = np.array(rhogas) * ppar['dusttogas']
# Split up the disk density distribution according to the given abundances
if 'ngs' in ppar:
if ppar['ngs'] > 1:
ngs = ppar['ngs']
#
# WARNING!!!!!!
# At the moment I assume that the multiple dust population differ from each other only in
# grain size but not in bulk density thus when I calculate the abundances / mass fractions
# they are independent of the grains bulk density since abundances/mass fractions are normalized
# to the total mass. Thus I use 1g/cm^3 for all grain sizes.
# TODO: Add the possibility to handle multiple dust species with different bulk densities and
# with multiple grain sizes.
#
gdens = np.zeros(ngs, dtype=float) + 1.0
gs = ppar['gsmin'] * (ppar['gsmax']/ppar['gsmin'])**(np.arange(ppar['ngs'], dtype=np.float64)
/ (float(ppar['ngs'])-1.))
gmass = 4./3.*np.pi*gs**3. * gdens
gsfact = gmass * gs**(ppar['gsdist_powex']+1)
gsfact = gsfact / gsfact.sum()
else:
gsfact = [1.0]
ngs = 1
elif 'mfrac' in ppar:
ngs = len(ppar['mfrac'])
gsfact = ppar['mfrac'] / ppar['mfrac'].sum()
else:
ngs = 1
gsfact = [1.0]
# if ppar.has_key('dustkappa_ext'):
# ngs = len(ppar['dustkappa_ext'])
# if ppar.has_key('mfrac'):
# gsfact = ppar['mfrac'] / ppar['mfrac'].sum()
# else:
# ngs = 1
# gsfact = [1.0]
# else:
# ngs = ppar['ngs']
#
# WARNING!!!!!!
# At the moment I assume that the multiple dust population differ from each other only in
# grain size but not in bulk density thus when I calculate the abundances / mass fractions
# they are independent of the grains bulk density since abundances/mass fractions are normalized
# to the total mass. Thus I use 1g/cm^3 for all grain sizes.
# TODO: Add the possibility to handle multiple dust species with different bulk densities and
# with multiple grain sizes.
#
# gdens = zeros(ngs, dtype=float) + 1.0
# gs = ppar['gsmin'] * (ppar['gsmax']/ppar['gsmin'])**(arange(ppar['ngs'], dtype=float64)
# / (float(ppar['ngs'])-1.))
# gmass = 4./3.*np.pi*gs**3. * gdens
# gsfact = gmass * gs**(ppar['gsdist_powex']+1)
# gsfact = gsfact / gsfact.sum()
rho_old = np.array(rho)
rho = np.zeros([grid.nx, grid.ny, grid.nz, ngs], dtype=np.float64)
for igs in range(ngs):
rho[:, :, :, igs] = rho_old[:, :, :] * gsfact[igs]
return rho
def getGasDensity(grid=None, ppar=None):
"""Calculates the gas density distribution in a protoplanetary disk.
Parameters
----------
grid : radmc3dGrid
An instance of the radmc3dGrid class containing the spatial and frequency/wavelength grid
ppar : dictionary
A dictionary containing all parameters of the model
Returns
-------
Returns the volume density in g/cm^3
"""
rr, th = np.meshgrid(grid.x, grid.y)
zz = rr * np.cos(th)
rcyl = rr * np.sin(th)
# Calculate the pressure scale height as a function of r, phi
hp = np.zeros([grid.nx, grid.ny, grid.nz], dtype=np.float64)
dum = ppar['hrdisk'] * (rcyl/ppar['hrpivot'])**ppar['plh'] * rcyl
if 'prim_rout' in ppar:
if ppar['prim_rout'] >= 1.:
dum_hrdisk = ppar['hrdisk'] * (rcyl/ppar['hrpivot'])**ppar['plh']
hpr0 = ppar['hrdisk'] * (ppar['prim_rout'] * ppar['rin']/ppar['hrpivot'])**ppar['plh']
dummy = | np.log10(hpr0 / ppar['hpr_prim_rout']) | numpy.log10 |
import numpy as np
import matplotlib.pyplot as plt
import time
from IPython import display
# Implemented methods
methods = ['DynProg', 'ValIter'];
# Some colours
LIGHT_RED = '#FFC4CC';
LIGHT_GREEN = '#95FD99';
BLACK = '#000000';
WHITE = '#FFFFFF';
LIGHT_PURPLE = '#E8D0FF';
LIGHT_ORANGE = '#FAE0C3';
SEB_GREEN = '#52B92C';
BUSTED_BLUE = '#5993B5'
class RobbingBanks:
# Actions
STAY = 0
MOVE_LEFT = 1
MOVE_RIGHT = 2
MOVE_UP = 3
MOVE_DOWN = 4
# Give names to actions
actions_names = {
STAY: "stay",
MOVE_LEFT: "move left",
MOVE_RIGHT: "move right",
MOVE_UP: "move up",
MOVE_DOWN: "move down"
}
# Reward values
def __init__(self, town_map):
""" Constructor of the environment town_map.
"""
self.STEP_REWARD = 0
self.BANK_REWARD = 10
self.CAUGHT_REWARD = -50
self.town_map = town_map;
self.initial_state = np.array([0,0,1,2])
self.actions = self.__actions();
self.states, self.map = self.__states();
self.n_actions = len(self.actions);
self.n_states = len(self.states);
self.transition_probabilities = self.__transitions();
self.rewards = self.__rewards();
def __actions(self):
actions = dict();
actions[self.STAY] = np.array([0, 0]);
actions[self.MOVE_LEFT] = np.array([0,-1]);
actions[self.MOVE_RIGHT] = | np.array([0, 1]) | numpy.array |
import json
import numpy as np
import tensorflow as tf
from utils.load_data import load_data
from utils.extraction_model import load_model
def find_face_units(model, data, verbose=False):
"""
Implementation of the method "Face-selective population estimation" from the paper :
""Convolutional neural networks explain tuning properties of anterior, but not middle, face-processing areas in
macaque inferotemporal cortex
https://www.nature.com/articles/s42003-020-0945-x
:param model:
:param config:
:return:
"""
print("shape data", np.shape(data))
x_face = data[:50]
x_object = data[50:]
FSI_list = []
for layer in model.layers:
if "conv" in layer.name:
if verbose:
print("layer:", layer.name)
# cut model
m = tf.keras.Model(inputs=model.input, outputs=layer.output)
# predict face and non_face outputs
preds_face = m.predict(x_face)
preds_object = m.predict(x_object)
# flatten array
preds_face = np.reshape(preds_face, (np.shape(preds_face)[0], -1))
preds_object = np.reshape(preds_object, (np.shape(preds_object)[0], -1))
n_features = | np.shape(preds_face) | numpy.shape |
"""
Script that splits the dataset by ratio
"""
import shutil
import os
import numpy as np
import argparse
def get_files_from_folder(path):
files = os.listdir(path)
return np.asarray(files)
def main(path_to_data, path_to_test_data, train_ratio):
# get dirs
_, dirs, _ = next(os.walk(path_to_data))
# calculates how many train data per class
data_counter_per_class = np.zeros((len(dirs)))
for i in range(len(dirs)):
path = os.path.join(path_to_data, dirs[i])
files = get_files_from_folder(path)
data_counter_per_class[i] = len(files)
test_counter = | np.round(data_counter_per_class * (1 - train_ratio)) | numpy.round |
from config import *
import numpy as np
import networkx as nx
import scipy.stats
import glob, os
def read_G(sub,ds,corr):
""" read the correlation graph for a subject with a given denoising strategy
"""
files = glob.glob(rootdir + "/data/04_correlations/corr-%s/ds-%s/*%s*.gexf"
%(corr,ds,sub))
return nx.read_gexf(files[0])
def read_M(sub,ds,corr):
""" read the correlation matrix for a subject with a given denoising strategy
"""
filesnp = glob.glob(rootdir + "/data/04_correlations/corr-%s/ds-%s/*%s*.npy"
%(corr,ds,sub))
return | np.load(filesnp[0]) | numpy.load |
import numpy as np
import cv2
from skimage.exposure import rescale_intensity as rescale_int
import matplotlib.cm as cm
import SimpleITK as sitk
"""Different image utils."""
__author__ = '<NAME>'
def get_pad_crop_bound(size):
"""Gets the padding or cropping bounds using the max of the input (padding) or output (cropping) size."""
max_size = np.max(size)
lb = []
ub = []
for sz in size:
diff = int(max_size - sz)
lb.append(diff / 2)
ub.append(diff - diff/2)
return lb, ub
def pad_resize(image, output_size, hw_only=False, interpolator=sitk.sitkNearestNeighbor, make_copy=True):
"""Resizes an image and pads it to a square or cubic image if required.
:param image: numpy array (hw, dhw, hwc, or dhwc) or SimpleITK Image (xy or xyz).
:param output_size: output size, (height, width) or (depth, height, width).
:param hw_only: if True, only zero-pad the height and width. 3D image only.
:param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.
:param make_copy: if True, a copy of the image is returned if there is no modification.
:return: Resized image.
"""
if len(output_size) == 2:
hw_only = False
original_image = image
# Remember image type for numpy array
image_type = None
if not isinstance(image, sitk.Image):
image_type = image.dtype
# Get input size in xy or xyz
if isinstance(image, sitk.Image):
input_size = image.GetSize()
else:
input_size = image.shape[::-1]
if image.ndim == len(output_size) + 1:
input_size = input_size[1:]
# Get output size in xy or xyz
output_size = output_size[::-1]
# Pad square or cube if necessary
# Same size for all directions (2D, 3D)
if np.unique(output_size).size == 1 and np.unique(input_size).size != 1 and not hw_only:
lb, ub = get_pad_crop_bound(input_size)
image = pad_or_crop(sitk.ConstantPad, image, lb, ub)
# Same size for height and width only (3D)
elif np.unique(output_size[:2]).size == 1 and np.unique(input_size[:2]).size != 1:
lb, ub = get_pad_crop_bound(input_size[:2])
image = pad_or_crop(sitk.ConstantPad, image, lb + [0], ub + [0])
# Resize
if not all(input_size[i] == output_size[i] for i in range(len(output_size))):
image = resize(image=image, output_size=output_size[::-1], interpolator=interpolator)
# Restore type
if image_type is not None:
image = get_array(image)
image = np.asarray(image, dtype=image_type)
# Make a copy if no change
if original_image is image and make_copy:
if isinstance(image, sitk.Image):
image = sitk.Cast(image, image.GetPixelID())
else:
image = image.copy()
return image
def pad_or_crop(ops, image, lb, ub):
"""Pads or crops an image.
:param ops: operation, sitk.ConstantPad or sitk.Crop.
:param image: input image. Can be numpy array or SimpleITK Image.
:param lb: padding lower bound.
:param ub: padding upper bound.
:return: padded image (SimpleITK Image).
"""
# Single-channel
if (isinstance(image, sitk.Image) and image.GetNumberOfComponentsPerPixel() == 1) or image.ndim == len(lb):
image = get_sitk_image(image)
image = ops(image, lb, ub)
# Multi-channel
else:
image = get_array(image)
image = np.moveaxis(image, -1, 0)
image_channels = [ops(get_sitk_image(img), lb, ub) for img in image]
image = sitk.Compose(image_channels)
return image
def reverse_pad_resize(image, output_size, hw_only=False, interpolator=sitk.sitkNearestNeighbor):
"""Reverses the process of pad_resize and returns the original sized image. This function is useful for resizing
the CNN output mask to fit the original image.
:param image: numpy array (hw, dhw, hwc or dhwc) or SimpleITK Image (xy or xyz).
:param output_size: output size, (height, width) or (depth, height, width), usually the shape of the original
image before pad_resize.
:param hw_only: if True, only crop the height and width. Needs to be consistent with pad_resize.
:param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.
:return the resized image with output_size.
"""
if len(output_size) == 2:
hw_only = False
original_image = image
# Remember image type for numpy array
image_type = None
if not isinstance(image, sitk.Image):
image_type = image.dtype
# Get input size in xy or xyz
if isinstance(image, sitk.Image):
input_size = image.GetSize()
else:
input_size = image.shape[::-1]
if image.ndim == len(output_size) + 1:
input_size = input_size[1:]
# Get output size in xy or xyz
output_size = output_size[::-1]
# Resize and crop
# Same size for all directions (2D, 3D)
if np.unique(input_size).size == 1 and np.unique(output_size).size != 1 and not hw_only:
size = np.ones(len(output_size)) * np.max(output_size)
image = resize(image=image, output_size=size, interpolator=interpolator)
lb, ub = get_pad_crop_bound(output_size)
image = pad_or_crop(sitk.Crop, image, lb, ub)
# Same size for height and width only (3D)
elif np.unique(input_size[:2]).size == 1 and np.unique(output_size[:2]).size != 1:
xy = output_size[:2]
size = [output_size[2]] + list(np.ones(len(xy)) * np.max(xy)) # dhw
image = resize(image=image, output_size=size, interpolator=interpolator)
lb, ub = get_pad_crop_bound(xy)
image = pad_or_crop(sitk.Crop, image, lb + [0], ub + [0])
# Resize only
else:
image = resize(image=image, output_size=output_size[::-1], interpolator=interpolator)
# Restore type
if image_type is not None:
image = get_array(image)
image = np.asarray(image, dtype=image_type)
# Make a copy if no change
if original_image is image:
if isinstance(image, sitk.Image):
image = sitk.Cast(image, image.GetPixelID())
else:
image = image.copy()
return image
def resize_by_spacing(image, input_spacing=None, interpolator=sitk.sitkNearestNeighbor):
"""Resizes an image to isotropic spacing.
The smallest spacing is used. This is useful as the image may be abnormally deformed when spacing information is
discarded.
:param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).
:param input_spacing: input image spacing, (height, width) or (depth, height, width).
:param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.
:return: Resized image.
"""
if input_spacing is None:
if isinstance(image, sitk.Image):
input_spacing = image.GetSpacing()
else:
raise Exception('Input spacing must be provided for non-SimpleITK images.')
# The smallest spacing is used
output_spacing = np.ones(len(input_spacing)) * np.min(input_spacing)
return resize(image=image, output_spacing=output_spacing, interpolator=interpolator)
def resize(image, output_size=None, output_spacing=None, interpolator=sitk.sitkNearestNeighbor):
"""Resizes an image by the given output size and/or output spacing.
:param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).
:param output_size: output size, (height, width) or (depth, height, width).
:param output_spacing: output spacing, (height, width) or (depth, height, width).
:param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.
:return: Resized image.
"""
if output_size is None and output_spacing is None:
raise Exception('Both output_size and output_spacing are None.')
image_type = None
if not isinstance(image, sitk.Image):
# Check if vector image
if output_size is not None:
target_dim = len(output_size)
else:
target_dim = len(output_spacing)
isVector = False
if image.ndim == target_dim + 1:
isVector = True
image_type = image.dtype # Remember the original type which may be changed during operations
image = get_sitk_image(image, isVector=isVector)
input_spacing = np.asarray(image.GetSpacing())
input_size = np.asarray(image.GetSize())
physical_size = input_spacing * input_size
# Change to SimpleITK format, xy or xyz
if output_size is not None:
output_size = np.asarray(output_size)[::-1]
if output_spacing is not None:
output_spacing = np.asarray(output_spacing)[::-1]
# Compute missing arguments assuming same physical size
if output_spacing is None:
output_spacing = physical_size / output_size
elif output_size is None:
output_size = physical_size / output_spacing
resample = sitk.ResampleImageFilter()
resample.SetInterpolator(interpolator)
resample.SetSize(np.asarray(output_size, np.int))
resample.SetOutputSpacing(output_spacing)
resample.SetOutputOrigin(image.GetOrigin())
image = resample.Execute(image)
if image_type is not None:
image = get_array(image)
image = np.asarray(image, dtype=image_type)
return image
def get_sitk_image(image, isVector=False):
"""Converts to a SimpleITK Image if necessary.
:param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).
:return: SimpleITK Image.
"""
if not isinstance(image, sitk.Image):
image = sitk.GetImageFromArray(image, isVector=isVector) # Transpose is taken care by SimpleITK
return image
def get_array(image):
"""Converts to a numpy array if necessary.
:param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).
:return: numpy array.
"""
if isinstance(image, sitk.Image):
image = sitk.GetArrayFromImage(image) # Transpose is taken care by SimpleITK
return image
def modify_size_channel(image, output_size, channels, interpolator=sitk.sitkNearestNeighbor):
"""Modifies image size and channel.
:param image: numpy array, channels_last, hwc or dhwc.
:param output_size: output size, (height, width) or (depth, height, width).
:param channels: output channels (1 or 3).
:param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.
:return: modified image.
"""
if image.ndim not in [3, 4]:
raise Exception('Input image must be 2D or 3D with channels.')
# Resize all channels
input_channels = image.shape[-1]
output_image = []
for i in range(input_channels):
output_image.append(resize(image=image[..., i], output_size=output_size, interpolator=interpolator))
image = np.array(output_image) # channels_first
# Modify channels if needed
if channels == 1 and input_channels == 3:
image = image.mean(axis=0, keepdims=True)
elif channels == 3 and input_channels == 1:
image = image.repeat(channels, axis=0)
# Change to channels_last
axes = range(image.ndim)
axes = axes[1:] + axes[:1]
image = image.transpose(axes)
return image
def modify_size_channel_batch(image, output_size, channels, interpolator=sitk.sitkNearestNeighbor):
"""Modifies image size and channels of an image batch.
:param image: numpy array, channels_last, bhwc or bdhwc.
:param output_size: output size, (height, width) or (depth, height, width).
:param channels: output channels (1 or 3).
:param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.
:return: modified image batch.
"""
assert image.ndim in [4, 5]
output_image = []
for img in image:
output_image.append(
modify_size_channel(image=img, output_size=output_size, channels=channels, interpolator=interpolator))
image = np.array(output_image)
return image
def bound_by_labels(labels, scale=1.0, pad_square=True):
"""
Gets a bounding box from a label image.
:param labels: grey-level label image.
:param scale: isotropic scaling of the bounding box.
:param pad_square: True if padding the shorter side of the bounding box.
:return: the bounding box with shape (2, 2). The first index is for the dimension (h, w), and the second index is
for the lower and upper bounds (lb, ub). Cropping with the bounding box can be performed as: cropped = image[
bound[0][0]:bound[0][1], bound[1][0]:bound[1][1]]
"""
# Get bounding box
_, contours, _ = cv2.findContours(labels.astype(np.uint8), mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)
points = np.concatenate(contours)
bound = cv2.boundingRect(points) # [x, y, w, h]
# Convert format
sz = np.array([bound[3], bound[2]])
bound = np.array([[bound[1], bound[1]+bound[3]],
[bound[0], bound[0]+bound[2]]])
# Rescale
if scale != 1.0:
for i in range(2):
diff = (scale-1) * 0.5 * sz[i]
bound[i][0] -= diff # lower bound
bound[i][1] += diff # upper bound
bound = bound.astype(np.int)
# Pad square
sz = bound[:, 1] - bound[:, 0]
if pad_square and sz[0] != sz[1]:
diff = sz.max() - sz.min()
idx = sz.argmin()
bound[idx][0] -= diff/2
bound[idx][1] += diff/2 + diff % 2
# Correct index
for i in range(2):
bound[i][0] = bound[i][0] if bound[i][0] >= 0 else 0
bound[i][1] = bound[i][1] if bound[i][1] <= labels.shape[i] else labels.shape[i]
return bound
def windowing(image, window_center, window_width):
"""Performs windowing on an image.
:param numpy.array image: grey-level image.
:param int/list/array window_center: for a scalar, it is used as the window center. For a list or array,
its length must be a multiple of two to represent pairs of possible ranges. A random number generated between a
pair is used as the window center. The pair used is the same as that of window_width if it is also a list or array.
:param int/list/array window_width: for a scalar, it is used as the window width. For a list or array,
its length must be a multiple of two to represent pairs of possible ranges. A random number generated between a
pair is used as the window width. The pair used is the same as that of window_center if it is also a list or array.
:return windowed image
window_center and window_width must have the same length if both are not scalar.
"""
idx = None # Index to both window_center and window_width
if | np.isscalar(window_center) | numpy.isscalar |
import numpy as np
from matplotlib import pyplot as plt
from torch_geometric.data import Data, DataLoader
import pandas as pd
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import TensorDataset
params = {
'font.size': 12,
'xtick.labelsize': 'small',
'ytick.labelsize': 'small',
'axes.linewidth': 1.3}
plt.rcParams.update(params)
# Load Data
labels = pd.read_csv("./data/labels.csv")
print(labels.iloc[:, 1])
maximum = np.max(labels.iloc[:, 1])
minimum = np.min(labels.iloc[:, 1])
drag = labels.iloc[:, 1]
# Update plotting parameters
params = {
'font.size': 12,
'xtick.labelsize': 'small',
'ytick.labelsize': 'small',
'axes.linewidth': 1.3}
plt.rcParams.update(params)
testnodes = []
trainnodes = []
r2nodes = []
nodes = 700
X = np.zeros((0, 2 * nodes))
c = 0
lim = 1500
y = np.zeros(0)
for label in labels.iloc[0:lim, 0]:
if label == 'fx79w660a':
c = c + 1
continue
adjvel = np.loadtxt('./data/' + str(nodes) + 'manadjvel/' +
label + '_adjvel', dtype='double').reshape(1, -1)
meanorig = np.mean(labels.iloc[0:lim, 1])
stdorig = np.std(labels.iloc[0:lim, 1])
if (labels.iloc[c, 1] > meanorig + 2 * stdorig):
print("drag too high, filtered")
c = c + 1
continue
if (labels.iloc[c, 1] < meanorig - 3 * stdorig):
print(meanorig - 4 * stdorig)
print(labels.iloc[c, 1], c)
print("drag too low, filtered")
c = c + 1
continue
else:
X = np.vstack((X, adjvel))
y = np.append(y, labels.iloc[c, 1])
c = c + 1
X_avg = np.zeros((X.shape[0], X.shape[1] - 5))
for i in range(2 * nodes - 5):
X_avg[:, i] = np.mean(X[:, i:i + 5], axis=1)
mean = | np.mean(y) | numpy.mean |
#!/bin/env python3
import subprocess
import numpy as np
import matplotlib.pyplot as plt
import yaml
import pickle
import os
def layer_perf():
m = 2
#nthreads = np.arange(1, 9)
nthreads = np.array([4])
means = np.zeros(nthreads.shape, dtype=np.float)
std = np.zeros(nthreads.shape, dtype=np.float)
times = np.zeros((m,), dtype=np.float)
for i, nthread in enumerate(nthreads):
print(nthread, end=": ")
for j in range(m):
p = subprocess.Popen("./test_layer_perf {:d}".format(nthread), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines = [x.decode('ascii').rstrip() for x in p.stdout.readlines()]
p.wait()
times[j] = float(lines[0])
print(times[j], end=" ")
print("")
means[i] = np.mean(times)
std[i] = np.std(times)
return nthreads, means, std
def main_scaling():
m = 2
modes = ["sync", "async", "rma"]
with open("../config.yaml", "r") as file:
config = yaml.load(file)
configs = dict()
for mode in modes:
configs[mode] = config.copy()
configs['sync']['nb_particles_per_cycle'] = 100000
configs['async']['nb_particles_per_cycle'] = 1000
configs['rma']['nb_particles_per_cycle'] = 65000
print("=== Configs ===")
for mode in modes:
print("{}: {}".format(mode, configs[mode]))
print("===============")
N = np.array([1, 2, 4, 5, 8, 10])
#N = np.array([1, 5])
res = dict()
res["N"] = N
res['modes'] = modes
for mode in modes:
print(mode)
res[mode] = dict()
res[mode]["means"] = np.zeros(N.shape, dtype=np.float)
res[mode]["std"] = | np.zeros(N.shape, dtype=np.float) | numpy.zeros |
#------------------------------------------------------------------------------
# Plotting.py
#
# Create publication-ready 3D and 2D plots using matplotlib
#
#
# Created: 4/4/18 - <NAME> -- <EMAIL>
#
# Modified:
# * 4/4/18 - DMN -- <EMAIL>
# - Added documentation for this script
#
#------------------------------------------------------------------------------
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import ticker as mtick
from matplotlib import rc
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os
from scipy.interpolate import griddata
from cycler import cycler
from pandas.plotting import register_matplotlib_converters
import datetime
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
from matplotlib.ticker import Formatter
import matplotlib.dates as mdates
import matplotlib.colors as colors
### MATPLOTLIBRC FORMAT
#mpl.rcParams['backend'] = 'MacOSX'
# LINES
mpl.rcParams['lines.linewidth'] = 2.0 # line width in points
mpl.rcParams['lines.dash_capstyle'] = 'round' # butt|round|projecting
# FONT
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.weight'] = 'normal'
#font.size : 12.0
# mpl.rcParams['font.serif'] = 'DejaVu Serif', 'CMU Serif', 'Bitstream Vera Serif', 'New Century Schoolbook', 'Century Schoolbook L', 'Utopia', 'ITC Bookman', 'Bookman', 'Nimbus Roman No9 L', 'Times New Roman', 'Times', 'Palatino', 'Charter', 'serif'
mpl.rcParams['font.serif'] = 'DejaVu Serif'
# TEXT
mpl.rcParams['text.hinting_factor'] = 8 # Specifies the amount of softness for hinting in the
# horizontal direction. A value of 1 will hint to full
# pixels. A value of 2 will hint to half pixels etc.
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preview'] = True
mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath} \boldmath"]
# AXES
mpl.rcParams['axes.labelsize'] = 22 # fontsize of the x any y labels
mpl.rcParams['axes.labelweight'] = 'medium' # weight of the x and y labels
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628'])
## color cycle for plot lines as list of string
## colorspecs: single letter, long name, or web-style hex
## Note the use of string escapes here ('1f77b4', instead of 1f77b4)
# TICKS
mpl.rcParams['xtick.labelsize'] = 18 # fontsize of the tick labels
mpl.rcParams['ytick.labelsize'] = 18 # fontsize of the tick labels
# GRID
mpl.rcParams['grid.color'] = '0.75' # grid color
mpl.rcParams['grid.linestyle'] = ':' # dotted
# LEGEND
mpl.rcParams['legend.fancybox'] = True # if True, use a rounded box for the
# legend, else a rectangle
mpl.rcParams['legend.fontsize'] = 16
mpl.rcParams['legend.borderaxespad'] = 0.1 # the border between the axes and legend edge in fraction of fontsize
# FIGURE
mpl.rcParams['figure.figsize'] = 6,4 # figure size in inches
mpl.rcParams['figure.subplot.left'] = 0.2 # the left side of the subplots of the figure
mpl.rcParams['figure.subplot.right'] = 0.9 # the right side of the subplots of the figure
mpl.rcParams['figure.subplot.bottom'] = 0.2 # the bottom of the subplots of the figure
mpl.rcParams['figure.subplot.top'] = 0.85 # the top of the subplots of the figure
mpl.rcParams['figure.subplot.wspace'] = 0.2 # the amount of width reserved for blank space between subplots
mpl.rcParams['figure.subplot.hspace'] = 0.2 # the amount of height reserved for white space between subplots
# SAVEFIG
mpl.rcParams['savefig.dpi'] = 600 # figure dots per inch
mpl.rcParams['savefig.format'] = 'svg' # png, ps, pdf, svg
# To generically create multiple plots
plot_linestyle = ['-','--','-.',':']
marker_weight = [30,60,40,40]
plot_markerstyle = ['o','x','v','^']
def set_lims(ax,X,Y,xmin,xmax,ymin,ymax):
if xmax == 0.:
xmax += 0.3
# Determine the lower and upper bounds of the horizontal axis
if xmax == None:
xmax = np.amax(X)
if xmin == None:
xmin = np.amin(X)
# Set the limits of the plot
plt.xlim(xmin, xmax)
if not isinstance(ymax,np.ndarray):
# Set the window limits
plt.ylim(np.amin(Y) - ymin * abs(np.amin(Y)),
np.amax(Y) + ymax * abs(np.amax(Y)-np.amin(Y)))
else:
plt.ylim(ymin[0],ymax[0])
# Container for all plots
def generate_plot(
X,Y,labels,xlabel,ylabel,
plot_type = 'Plot',
ymax = 0.1,
ymin = 0.1,
xmax = None,
xmin = None,
tick_increment = None,
showplot = False,
save_plot = False,
log_y = False,
log_x = False,
transparent = False,
grid = False,
folder = None,
filename = 'Plot',
num_col = 2,
legend_loc = 'upper right',
experimental_args = None,
xlabelpad = 5,
hide_origin = False,
for_notebook=False,
template='publication',
file_type='pdf'
):
'''
This is a function which accepts a series of data and plots it based on preset defaults
as well as user-defined, custom inputs.
Creator : <NAME> - <EMAIL>
Mandatory Inputs:
X - x-coordinate of the plot
Y - y-coordinates of the plot. Must have an axis of the same length as X
labels - list of strings which form the labels we will use for the legend
xlabel - Label along the X-axis
ylabel - Label along the Y-axis
Optional Inputs:
plot_type - String indicating the type of plot
ymax - multiplicative value for the maximum Y value
ymin - multiplicative value for the minimum Y value
xmax - maximum X value
xmin - minimum X value
tick_increment - spacing between y-axis ticks
showplot - boolean indicating whether the plot is displayed
log_y - boolean indicating whether the y-axis should be on a log scale
transparent - boolean indicating whether to save a transparent .png
grid - boolean indicating whether to show the grid
folder - subfolder in which to save the figure
filename - string indicating the name of the saved file
num_col - number of columns in the legend
legend_loc - string indicating the location of the legend
experimental_args - experimental values to show on the plot
xlabelpad - spacing between the x-axis and the x-label
'''
if template.lower() == 'large':
plt.figure(figsize=(10,6.67))
elif template.lower() == 'wide':
plt.figure(figsize=(12,4))
elif template.lower() == 'presentation':
plt.figure(figsize=(9,6))
elif template.lower() == 'presentation-wide':
plt.figure(figsize=(12,6))
else:
plt.figure()
# Customize the axes
ax = plt.gca()
# Make sure the Y data is at least 2-D
Y = np.atleast_2d(Y)
# Ensure the compatibility of the X and Y data
if Y.shape[0] != X.shape[0] and Y.shape[1] != X.shape[0]:
raise ValueError(
'''The Shape of X, [{}], is not compatible
with the shape of Y, [{}]...\n Exiting'''
.format(X.shape,Y.shape))
return
elif Y.shape[0] != X.shape[0]:
Y = Y.T
if Y.shape[1] != len(labels):
raise ValueError('Please ensure the number of legend labels matches the number of data plots.')
if plot_type.lower() == 'plot':
# Plot all of the available data
for i in np.arange(0,len(labels)):
if labels[i].lower() == 'vtol':
plt.plot(X, Y[:,i],
label=r'$V_{tol}$',
color='k',
linestyle=plot_linestyle[1], # Linestyle given from array at the beginning of this document
linewidth=1)
elif 'sigma' in labels[i].lower():
plt.plot(X, Y[:,i],
label=r'\textbf{' + labels[i] + '}',
color='k',
linestyle=plot_linestyle[1], # Linestyle given from array at the beginning of this document
linewidth=2)
else:
if log_y:
plt.semilogy(X, Y[:,i],
label=r'\textbf{' + labels[i] + '}',
linestyle=plot_linestyle[i], # Linestyle given from array at the beginning of this document
linewidth=2)
else:
plt.plot(X, Y[:,i],
label=r'\textbf{' + labels[i] + '}',
linestyle=plot_linestyle[i], # Linestyle given from array at the beginning of this document
linewidth=2)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if tick_increment is not None:
loc = mtick.MultipleLocator(base=tick_increment) # this locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
set_lims(ax,X,Y,xmin,xmax,ymin,ymax)
# Show the grid, if desired
ax.grid(grid)
ax.set_axisbelow(True)
# If we want to plot experimental data
if experimental_args is not None:
data,positions = experimental_args
if len(np.atleast_2d(data)[:,0]) > 1:
# This code is for closely grouped experimental data that doesn't ened a box and whisker plot
means = np.average(data,axis=0)
maxes = | np.amax(data,axis=0) | numpy.amax |
import numpy as np
import joblib
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier
from matplotlib.lines import Line2D
from xgboost import XGBClassifier
import xgboost as xgb
from src.data.features_util import list_test_files
from src.data.workout import Activity
from src.config import (
TRAIN_BOOT_DIR, TRAIN_POLE_DIR, TRAIN_FEATURES_FILENAME, TRAIN_LABELS_FILENAME
)
from numpy import ndarray
from pathlib import Path
def create_model() -> any:
return GradientBoostingClassifier(verbose=True)
def create_xgboost() -> any:
return XGBClassifier(verbosity=2, use_label_encoder=False)
def evaluate_model_accuracy(features: ndarray, labels: ndarray, model: any = create_model()):
"""
Evaluate model accuracy using k-fold cross-validation
"""
print('Evaluating model accuracy...')
cv = RepeatedStratifiedKFold(n_splits=4, n_repeats=1, random_state=1)
n_scores = cross_val_score(model, features, labels, scoring='accuracy', cv=cv, n_jobs=-1, verbose=1)
print('Mean Accuracy: %.3f (%.3f)' % (np.mean(n_scores), np.std(n_scores)))
def train_model(activity: Activity, model: any = create_model):
data_dir: Path = TRAIN_BOOT_DIR if activity == Activity.Boot else TRAIN_POLE_DIR
features: ndarray = np.load(data_dir / TRAIN_FEATURES_FILENAME)
labels: ndarray = | np.load(data_dir / TRAIN_LABELS_FILENAME) | numpy.load |
import unittest
import numpy as np
import syft
from syft import TensorBase
class ConvenienceTests(unittest.TestCase):
def test_zeros(self):
self.assertTrue((syft.zeros(5).data == np.zeros(5)).all())
def test_ones(self):
self.assertTrue((syft.ones(5).data == np.ones(5)).all())
def test_rand(self):
np.random.seed(0)
x = syft.rand(5).data
np.random.seed(0)
y = np.random.rand(5)
self.assertTrue((x == y).all())
class DotTests(unittest.TestCase):
def test_dot_int(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([4, 5, 6]))
self.assertEqual(syft.dot(t1, t2), 32)
def test_dot_float(self):
t1 = TensorBase(np.array([1.3, 2.5, 3.7]))
t2 = TensorBase(np.array([4.9, 5.8, 6.5]))
self.assertEqual(syft.dot(t1, t2), 44.92)
class DiagTests(unittest.TestCase):
def test_one_dim_tensor_main_diag(self):
t = TensorBase(np.array([1, 2, 3, 4]))
self.assertTrue(syft.equal(syft.diag(t), TensorBase([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])))
def test_one_dim_tensor_upper_diag(self):
t = TensorBase(np.array([1, 2, 3, 4]))
self.assertTrue(syft.equal(syft.diag(t, 1), TensorBase([[0, 1, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 3, 0],
[0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]])))
def test_one_dim_tensor_below_diag(self):
t = TensorBase(np.array([1, 2, 3, 4]))
self.assertTrue(syft.equal(syft.diag(t, -1), TensorBase([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 4, 0]])))
def test_two_dim_tensor_main_diag(self):
t = TensorBase(np.array([[0, 1], [2, 3]]))
self.assertTrue(syft.equal(syft.diag(t, 0), TensorBase([0, 3])))
def test_two_dim_tensor_upper_diag(self):
t = TensorBase(np.array([[0, 1], [2, 3]]))
self.assertTrue(syft.equal(syft.diag(t, 1), TensorBase([1])))
def test_two_dim_tensor_below_diag(self):
t = TensorBase(np.array([[0, 1], [2, 3]]))
self.assertTrue(syft.equal(syft.diag(t, -1), TensorBase([2])))
class CeilTests(unittest.TestCase):
def test_ceil(self):
t1 = TensorBase(np.array([[2.3, 4.1], [7.4, 8.3]]))
self.assertTrue(syft.equal(syft.ceil(t1), TensorBase([[3., 5.],
[8., 9.]])))
class FloorTests(unittest.TestCase):
def test_floor(self):
t1 = TensorBase(np.array([[2.3, 4.1], [7.4, 8.3]]))
self.assertTrue(syft.equal(syft.math.floor(t1), TensorBase([[2., 4.],
[7., 8.]])))
class SinTests(unittest.TestCase):
def test_sin(self):
# int
t1 = TensorBase(np.array([[3, 1, 2], [0, -1, 2]]))
t2 = syft.math.sin(t1)
self.assertTrue(syft.equal(t1.data, np.array([[3, 1, 2], [0, -1, 2]])))
self.assertTrue(syft.equal(t2.data, np.sin(np.array([[3, 1, 2], [0, -1, 2]]))))
# float
t1 = TensorBase(np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]]))
t2 = syft.math.sin(t1)
self.assertTrue(syft.equal(t1.data, np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]])))
self.assertTrue(syft.equal(t2.data, np.sin(np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]]))))
class SinhTests(unittest.TestCase):
def test_sinh(self):
# int
t1 = TensorBase(np.array([[3, 1, 2], [0, -1, 2]]))
t2 = syft.math.sinh(t1)
self.assertTrue(syft.equal(t1.data, np.array([[3, 1, 2], [0, -1, 2]])))
self.assertTrue(syft.equal(t2.data, np.sinh(np.array([[3, 1, 2], [0, -1, 2]]))))
# float
t1 = TensorBase(np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]]))
t2 = syft.math.sinh(t1)
self.assertTrue(syft.equal(t1.data, np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]])))
self.assertTrue(syft.equal(t2.data, np.sinh(np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]]))))
class CosTests(unittest.TestCase):
def test_cos(self):
# int
t1 = TensorBase(np.array([[3, 1, 2], [0, -1, 2]]))
t2 = syft.math.cos(t1)
self.assertTrue(syft.equal(t1.data, np.array([[3, 1, 2], [0, -1, 2]])))
self.assertTrue(syft.equal(t2.data, np.cos(np.array([[3, 1, 2], [0, -1, 2]]))))
# float
t1 = TensorBase(np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]]))
t2 = syft.math.cos(t1)
self.assertTrue(syft.equal(t1.data, | np.array([[3.3, 1.3, 2.2], [0.0, -1.3, 2.4]]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = pd.Series(drnnGRUtanhMakespanValues).rolling(12).mean()
plt.plot(smoothGRUtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
smoothGRUtanhRewardsValues = pd.Series(drnnGRUtanhRewardsValues).rolling(12).mean()
plt.plot(smoothGRUtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
#####################
smoothGRUreluMakespanValues = pd.Series(drnnGRUreluMakespanValues).rolling(12).mean()
plt.plot(smoothGRUreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
smoothGRUreluRewardsValues = pd.Series(drnnGRUreluRewardsValues).rolling(12).mean()
plt.plot(smoothGRUreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
###################
plt.plot(smoothGRUtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothGRUreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
plt.plot(smoothGRUtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothGRUreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
drnnLSTMtanhMakespan = []
drnnLSTMtanhRewards = []
drnnLSTMtanhMakespanList = []
drnnLSTMtanhRewardsList = []
drnnLSTMtanhMakespanValues = []
drnnLSTMtanhRewardsValues = []
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan0))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan1))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan2))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan3))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan4))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan5))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan6))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan7))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan8))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan9))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan10))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan11))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan12))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan13))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan14))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan15))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan16))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan17))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan18))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan19))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan20))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan21))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan22))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan23))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan24))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan25))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan26))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan27))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan28))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan29))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan30))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan31))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan32))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan33))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan34))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan35))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan36))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan37))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan38))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan39))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan40))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan41))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan42))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan43))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan44))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan45))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan46))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan47))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan48))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan49))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards0))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards1))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards2))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards3))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards4))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards5))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards6))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards7))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards8))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards9))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards10))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards11))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards12))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards13))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards14))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards15))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards16))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards17))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards18))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards19))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards20))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards21))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards22))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards23))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards24))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards25))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards26))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards27))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards28))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards29))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards30))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards31))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards32))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards33))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards34))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards35))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards36))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards37))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards38))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards39))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards40))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards41))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards42))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards43))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards44))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards45))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards46))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards47))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards48))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards49))
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan0)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan1)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan2)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan3)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan4)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan5)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan6)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan7)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan8)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan9)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan10)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan11)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan12)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan13)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan14)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan15)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan16)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan17)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan18)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan19)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan20)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan21)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan22)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan23)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan24)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan25)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan26)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan27)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan28)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan29)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan30)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan31)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan32)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan33)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan34)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan35)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan36)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan37)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan38)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan39)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan40)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan41)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan42)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan43)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan44)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan45)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan46)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan47)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan48)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan49)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards0)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards1)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards2)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards3)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards4)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards5)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards6)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards7)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards8)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards9)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards10)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards11)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards12)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards13)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards14)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards15)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards16)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards17)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards18)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards19)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards20)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards21)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards22)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards23)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards24)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards25)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards26)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards27)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards28)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards29)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards30)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards31)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards32)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards33)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards34)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards35)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards36)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards37)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards38)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards39)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards40)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards41)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards42)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards43)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards44)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards45)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards46)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards47)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards48)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards49)
for vector in drnnLSTMtanhMakespanList:
for element in vector:
drnnLSTMtanhMakespanValues.append(element)
for vector in drnnLSTMtanhRewardsList:
for element in vector:
drnnLSTMtanhRewardsValues.append(element)
smoothLSTMtanhMakespanValues = pd.Series(drnnLSTMtanhMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con tanh")
plt.show()
smoothLSTMtanhRewardsValues = pd.Series(drnnLSTMtanhRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con tanh")
plt.show()
####################
drnnLSTMreluMakespan = []
drnnLSTMreluRewards = []
drnnLSTMreluMakespanList = []
drnnLSTMreluRewardsList = []
drnnLSTMreluMakespanValues = []
drnnLSTMreluRewardsValues = []
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan0))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan1))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan2))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan3))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan4))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan5))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan6))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan7))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan8))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan9))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan10))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan11))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan12))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan13))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan14))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan15))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan16))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan17))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan18))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan19))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan20))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan21))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan22))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan23))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan24))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan25))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan26))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan27))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan28))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan29))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan30))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan31))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan32))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan33))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan34))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan35))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan36))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan37))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan38))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan39))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan40))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan41))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan42))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan43))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan44))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan45))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan46))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan47))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan48))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan49))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards0))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards1))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards2))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards3))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards4))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards5))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards6))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards7))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards8))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards9))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards10))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards11))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards12))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards13))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards14))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards15))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards16))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards17))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards18))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards19))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards20))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards21))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards22))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards23))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards24))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards25))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards26))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards27))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards28))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards29))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards30))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards31))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards32))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards33))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards34))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards35))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards36))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards37))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards38))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards39))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards40))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards41))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards42))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards43))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards44))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards45))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards46))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards47))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards48))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards49))
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan0)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan1)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan2)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan3)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan4)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan5)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan6)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan7)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan8)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan9)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan10)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan11)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan12)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan13)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan14)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan15)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan16)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan17)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan18)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan19)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan20)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan21)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan22)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan23)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan24)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan25)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan26)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan27)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan28)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan29)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan30)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan31)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan32)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan33)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan34)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan35)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan36)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan37)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan38)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan39)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan40)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan41)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan42)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan43)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan44)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan45)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan46)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan47)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan48)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan49)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards0)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards1)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards2)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards3)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards4)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards5)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards6)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards7)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards8)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards9)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards10)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards11)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards12)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards13)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards14)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards15)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards16)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards17)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards18)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards19)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards20)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards21)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards22)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards23)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards24)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards25)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards26)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards27)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards28)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards29)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards30)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards31)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards32)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards33)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards34)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards35)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards36)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards37)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards38)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards39)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards40)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards41)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards42)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards43)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards44)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards45)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards46)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards47)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards48)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards49)
for vector in drnnLSTMreluMakespanList:
for element in vector:
drnnLSTMreluMakespanValues.append(element)
for vector in drnnLSTMreluRewardsList:
for element in vector:
drnnLSTMreluRewardsValues.append(element)
smoothLSTMreluMakespanValues = pd.Series(drnnLSTMreluMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con relu")
plt.show()
smoothLSTMreluRewardsValues = pd.Series(drnnLSTMreluRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con relu")
plt.show()
##################
plt.plot(smoothLSTMtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
plt.plot(smoothLSTMtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
##################
##################
drlTanhMakespan = []
drlTanhRewards = []
drlTanhMakespanList = []
drlTanhRewardsList = []
drlTanhMakespanValues = []
drlTanhRewardsValues = []
drlTanhMakespan.append(np.mean(drlTanhMakespan0))
drlTanhMakespan.append(np.mean(drlTanhMakespan1))
drlTanhMakespan.append(np.mean(drlTanhMakespan2))
drlTanhMakespan.append(np.mean(drlTanhMakespan3))
drlTanhMakespan.append(np.mean(drlTanhMakespan4))
drlTanhMakespan.append(np.mean(drlTanhMakespan5))
drlTanhMakespan.append(np.mean(drlTanhMakespan6))
drlTanhMakespan.append(np.mean(drlTanhMakespan7))
drlTanhMakespan.append(np.mean(drlTanhMakespan8))
drlTanhMakespan.append(np.mean(drlTanhMakespan9))
drlTanhMakespan.append(np.mean(drlTanhMakespan10))
drlTanhMakespan.append(np.mean(drlTanhMakespan11))
drlTanhMakespan.append(np.mean(drlTanhMakespan12))
drlTanhMakespan.append(np.mean(drlTanhMakespan13))
drlTanhMakespan.append(np.mean(drlTanhMakespan14))
drlTanhMakespan.append( | np.mean(drlTanhMakespan15) | numpy.mean |
"""Functions to clean images by fitting linear trends to the initial scans."""
try:
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
HAS_MPL = True
except ImportError:
HAS_MPL = False
from .fit import contiguous_regions
from .utils import jit, vectorize
from .histograms import histogram2d
import numpy as np
__all__ = ["fit_full_image", "display_intermediate"]
@vectorize('(float64(float64,float64,float64,float64))', nopython=True)
def _align_fast(x, scan, m, q):
"""Align ``scan`` to a linear function."""
return scan - x * m - q
XBUFFER = None
YBUFFER = None
def _get_coords(xedges, yedges):
"""Get coordinates given the edges of the histogram."""
global XBUFFER, YBUFFER
if XBUFFER is None:
xcenters = (xedges[:-1] + xedges[1:]) / 2
ycenters = (yedges[:-1] + yedges[1:]) / 2
X, Y = np.meshgrid(xcenters, ycenters)
XBUFFER = X
YBUFFER = Y
return XBUFFER, YBUFFER
EXPOMAP = None
def _calculate_image(x, y, counts, bx, by, nsamp):
"""Calculate the image."""
global EXPOMAP
if EXPOMAP is None:
EXPOMAP, xedges, yedges = histogram2d(x, y, bins=(bx, by),
weights=nsamp)
histograms, xedges, yedges = \
histogram2d(x, y, bins=(bx, by),
weights=[counts * nsamp, (counts) ** 2 * nsamp])
img, img_var = histograms
X, Y = _get_coords(xedges, yedges)
good = EXPOMAP > 0
mean = img.copy()
mean[good] /= EXPOMAP[good]
img_var[good] = img_var[good] / EXPOMAP[good] - mean[good] ** 2
return X, Y, mean.T, img_var.T
@jit # (nopython=True)
def _align_all(newd_t, newd_c, data_idx, par):
ms = np.zeros_like(newd_c, dtype=np.float64)
qs = np.zeros_like(newd_c, dtype=np.float64)
for i_p in range(0, len(par), 2):
i0, i1 = data_idx[i_p // 2]
if i0 == i1:
continue
sliceobj = slice(i0, i1)
ms[sliceobj] = par[i_p]
qs[sliceobj] = par[i_p + 1]
return _align_fast(newd_t, newd_c, ms, qs)
def counter(initial_value=0):
count = initial_value
while True:
yield count
count += 1
ITERATION_COUNT = counter(0)
CURR_CHANNEL = "Feed0_RCP"
def _save_intermediate(filename, par):
np.savetxt(filename, par)
def _get_saved_pars(filename):
return np.genfromtxt(filename)
def _save_iteration(par):
iteration = next(ITERATION_COUNT)
print(iteration, end="\r")
if iteration % 2 == 0:
_save_intermediate("out_iter_{}_{:03d}.txt".format(CURR_CHANNEL,
iteration), par)
def _obj_fun(par, data, data_idx, excluded, bx, by):
"""
This is the function we have to minimize.
Parameters
----------
par : array([m0, q0, m1, q1, ...])
linear baseline parameters for the image.
data : [times, idxs, x, y, counts]
All five quantities are ``numpy`` ``array``s; ``time`` is time
from the start of the scan; ``x``, ``y`` are the image coordinates,
``idx`` corresponds to the scan number and ``counts`` to the scan
values at those coordinates.
excluded : [[centerx0, centery0, radius0]]
list of circular regions to exclude from fitting (e.g. strong sources
that might alter the total rms)
"""
newd_t, _, newd_x, newd_y, newd_c, newd_e = data
newd_c_new = _align_all(newd_t, newd_c, data_idx, par)
X, Y, img, img_var = _calculate_image(newd_x, newd_y, newd_c_new, bx, by,
newd_e)
good = img != 0.
if excluded is not None:
for e in excluded:
centerx, centery, radius = e
filt = (X - centerx) ** 2 + (Y - centery) ** 2 < radius ** 2
good[filt] = 0
stat = np.sum(img_var[good]) + np.var(img[good]) * img[good].size
return stat
def _resample_scans(data):
"""Resample all scans to match the pixels of the image."""
t, idx, x, y, c = data
xmax, xmin = np.max(x), np.min(x)
ymax, ymin = np.max(y), np.min(y)
x_range = xmax - xmin
y_range = ymax - ymin
bx = np.linspace(xmin, xmax, int(x_range) + 1)
by = np.linspace(ymin, ymax, int(y_range) + 1)
newt = np.array([], dtype=np.float64)
newi = np.array([], dtype=int)
newx = np.array([], dtype=np.float64)
newy = np.array([], dtype=np.float64)
newc = np.array([], dtype=np.float64)
newe = np.array([], dtype=np.float64)
for i in list(set(idx)):
good = idx == i
x_filt = x[good]
n = len(x_filt)
if n == 0:
continue
y_filt = y[good]
c_filt = c[good]
t_filt = t[good]
t_filt -= t_filt[0]
hists, _, _ = \
histogram2d(x_filt, y_filt, bins=(bx, by),
weights=[np.ones(n), t_filt, x_filt, y_filt, c_filt])
expo, time, X, Y, counts = hists
good = expo > 0
goodexpo = expo[good]
tdum = np.ndarray.flatten(time[good] / goodexpo)
cdum = np.ndarray.flatten(counts[good] / goodexpo)
idum = np.ndarray.flatten(i + np.zeros(len(goodexpo), dtype=int))
xdum = np.ndarray.flatten(X[good] / goodexpo)
ydum = np.ndarray.flatten(Y[good] / goodexpo)
edum = np.ndarray.flatten(goodexpo)
newt = np.append(newt, tdum)
newc = | np.append(newc, cdum) | numpy.append |
# Copyright (c) 2022 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
from __future__ import annotations
import sys
from os import path
import numpy as np
from dask import delayed
from dask.distributed import Client, get_client
from geoh5py.groups import ContainerGroup
from geoh5py.objects import Curve, Points
from geoh5py.ui_json import InputFile
from tqdm import tqdm
from geoapps.base.application import BaseApplication
from geoapps.utils import geophysical_systems
from geoapps.utils.formatters import string_name
from geoapps.utils.utils import hex_to_rgb
from .params import PeakFinderParams
from .utils import default_groups_from_property_group, find_anomalies
class PeakFinderDriver:
def __init__(self, params: PeakFinderParams):
self.params: PeakFinderParams = params
def run(self, output_group=None):
print("Reading parameters...")
try:
client = get_client()
except ValueError:
client = Client()
workspace = self.params.geoh5
survey = self.params.objects
prop_group = [pg for pg in survey.property_groups if pg.uid == self.params.data]
if self.params.tem_checkbox:
system = geophysical_systems.parameters()[self.params.system]
normalization = system["normalization"]
else:
normalization = [1]
if output_group is None:
output_group = ContainerGroup.create(
workspace, name=string_name(self.params.ga_group_name)
)
line_field = self.params.line_field
lines = np.unique(line_field.values)
if self.params.group_auto and any(prop_group):
channel_groups = default_groups_from_property_group(prop_group[0])
else:
channel_groups = self.params.groups_from_free_params()
active_channels = {}
for group in channel_groups.values():
for channel in group["properties"]:
obj = workspace.get_entity(channel)[0]
active_channels[channel] = {"name": obj.name}
for uid, channel_params in active_channels.items():
obj = workspace.get_entity(uid)[0]
if self.params.tem_checkbox:
channel = [ch for ch in system["channels"].keys() if ch in obj.name]
if any(channel):
channel_params["time"] = system["channels"][channel[0]]
else:
continue
channel_params["values"] = client.scatter(
obj.values.copy() * (-1.0) ** self.params.flip_sign
)
print("Submitting parallel jobs:")
anomalies = []
locations = client.scatter(survey.vertices.copy())
for line_id in tqdm(list(lines)):
line_indices = np.where(line_field.values == line_id)[0]
anomalies += [
client.compute(
delayed(find_anomalies)(
locations,
line_indices,
active_channels,
channel_groups,
data_normalization=normalization,
smoothing=self.params.smoothing,
min_amplitude=self.params.min_amplitude,
min_value=self.params.min_value,
min_width=self.params.min_width,
max_migration=self.params.max_migration,
min_channels=self.params.min_channels,
minimal_output=True,
)
)
]
(
channel_group,
tau,
migration,
azimuth,
cox,
amplitude,
inflx_up,
inflx_dwn,
start,
end,
skew,
peaks,
) = ([], [], [], [], [], [], [], [], [], [], [], [])
print("Processing and collecting results:")
for future_line in tqdm(anomalies):
line = future_line.result()
for group in line:
if "channel_group" in group.keys() and len(group["cox"]) > 0:
channel_group += group["channel_group"]["label"]
if group["linear_fit"] is None:
tau += [0]
else:
tau += [np.abs(group["linear_fit"][0] ** -1.0)]
migration += [group["migration"]]
amplitude += [group["amplitude"]]
azimuth += [group["azimuth"]]
cox += [group["cox"]]
inflx_dwn += [group["inflx_dwn"]]
inflx_up += [group["inflx_up"]]
start += [group["start"]]
end += [group["end"]]
skew += [group["skew"]]
peaks += [group["peaks"]]
print("Exporting...")
if cox:
channel_group = np.hstack(channel_group) # Start count at 1
# Create reference values and color_map
group_map, color_map = {}, []
for ind, (name, group) in enumerate(channel_groups.items()):
group_map[ind + 1] = name
color_map += [[ind + 1] + hex_to_rgb(group["color"]) + [1]]
color_map = np.core.records.fromarrays(
np.vstack(color_map).T, names=["Value", "Red", "Green", "Blue", "Alpha"]
)
points = Points.create(
self.params.geoh5,
name="PointMarkers",
vertices=np.vstack(cox),
parent=output_group,
)
points.entity_type.name = self.params.ga_group_name
migration = np.hstack(migration)
dip = migration / migration.max()
dip = np.rad2deg(np.arccos(dip))
skew = np.hstack(skew)
azimuth = np.hstack(azimuth)
points.add_data(
{
"amplitude": {"values": np.hstack(amplitude)},
"skew": {"values": skew},
}
)
if self.params.tem_checkbox:
points.add_data(
{
"tau": {"values": np.hstack(tau)},
"azimuth": {"values": azimuth},
"dip": {"values": dip},
}
)
channel_group_data = points.add_data(
{
"channel_group": {
"type": "referenced",
"values": np.hstack(channel_group),
"value_map": group_map,
}
}
)
channel_group_data.entity_type.color_map = {
"name": "Time Groups",
"values": color_map,
}
if self.params.tem_checkbox:
group = points.find_or_create_property_group(
name="AzmDip", property_group_type="Dip direction & dip"
)
group.properties = [
points.get_data("azimuth")[0].uid,
points.get_data("dip")[0].uid,
]
# Add structural markers
if self.params.structural_markers:
if self.params.tem_checkbox:
markers = []
def rotation_2D(angle):
R = np.r_[
np.c_[
np.cos(np.pi * angle / 180),
-np.sin(np.pi * angle / 180),
],
np.c_[
np.sin(np.pi * angle / 180), np.cos(np.pi * angle / 180)
],
]
return R
for azm, xyz, mig in zip(
np.hstack(azimuth).tolist(),
np.vstack(cox).tolist(),
migration.tolist(),
):
marker = np.r_[
np.c_[-0.5, 0.0] * 50,
np.c_[0.5, 0] * 50,
np.c_[0.0, 0.0],
np.c_[0.0, 1.0] * mig,
]
marker = (
np.c_[np.dot(rotation_2D(-azm), marker.T).T, np.zeros(4)]
+ xyz
)
markers.append(marker.squeeze())
curves = Curve.create(
self.params.geoh5,
name="TickMarkers",
vertices=np.vstack(markers),
cells=np.arange(len(markers) * 4, dtype="uint32").reshape(
(-1, 2)
),
parent=output_group,
)
channel_group_data = curves.add_data(
{
"channel_group": {
"type": "referenced",
"values": np.kron(np.hstack(channel_group), np.ones(4)),
"value_map": group_map,
}
}
)
channel_group_data.entity_type.color_map = {
"name": "Time Groups",
"values": color_map,
}
inflx_pts = Points.create(
self.params.geoh5,
name="Inflections_Up",
vertices= | np.vstack(inflx_up) | numpy.vstack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.